1 Carga de paquetes

Comenzamos la tarea cargando los paquetes que serán necesarios.

library(quantmod)
library(PerformanceAnalytics)
library(dygraphs)
library(forecast)
library(FinTS)
library(rugarch)
library(gets)
library(ggplot2)
library(car)
library(astsa)
library(parallel)
library(tseries)
library(dynlm)
library(tseries)
library(forecast)
library(lmtest)
library(sandwich)
library(plotly)
library(tsoutliers)
library(urca)
library(gets)
library(dyn)
library(cointReg)
library(ggplot2)
library(zoo)
library(vars)

2 Descarga y manejo de datos

Descargamos 3 valores del índice EUROSTOX50:

getSymbols(c("^STOXX50E","ASML.AS", "BAYN.DE", "BNP.PA"), from= '2005-01-03' )
## [1] "STOXX50E" "ASML.AS"  "BAYN.DE"  "BNP.PA"
# descargar el tipo de interés
getSymbols("DTB3", src = "FRED", from= '2005-01-03' )
## [1] "DTB3"

Anualizamos los tipos de los Bonos Alemanes a 10 años:

DTB3 <- DTB3/(253)

Seleccionamos los datos ajustasos y los unimos con merge en caso de ralizar tratamiento de datos conjunto:

fdata <- merge(STOXX50E$STOXX50E.Adjusted, ASML.AS$ASML.AS.Adjusted, BAYN.DE$BAYN.DE.Adjusted,BNP.PA$BNP.PA.Adjusted)

names(fdata) <- c("EUROSTOX", "ASML", "BAYN", "BNP")

Por el otro lado, extraemos los datos por si fuera necesario tratamiento individual.

EUROSTOX <- fdata$EUROSTOX
ASML <- fdata$ASML
BAYN <- fdata$BAYN
BNP <- fdata$BNP

Veamos la representación gráfica de los valores obtenidos:

dygraph(EUROSTOX, main = "EUROSTOX50") %>% dyRangeSelector(dateWindow = c("2008-01-01", "2017-04-01"))
dygraph(ASML, main = "ASML Holding N.V.") %>% dyRangeSelector(dateWindow = c("2008-01-01", "2017-04-01"))
dygraph(BAYN, main = "Bayer AG") %>% dyRangeSelector(dateWindow = c("2008-01-01", "2017-04-01"))
dygraph(BNP, main = "BNP Paribas S.A.") %>% dyRangeSelector(dateWindow = c("2008-01-01", "2017-04-01"))

2.1 Cálculo de los retornos como porcentaje

rdata <- Return.calculate(fdata, method = "log")*100

rdata <- na.omit(merge(rdata, DTB3))

head(rdata)
##               EUROSTOX       ASML        BAYN        BNP        DTB3
## 2005-01-04  0.03702993 -1.8722247 -1.44514257  0.6495717 0.009090909
## 2005-01-05 -0.80868121 -2.8763703 -2.58036866 -0.5566617 0.009051383
## 2005-01-06  0.64429835 -0.1769240  0.04392226  1.4776002 0.008972332
## 2005-01-07  0.45643826 -0.4434562 -0.29134506  1.1845145 0.009051383
## 2005-01-10 -0.08729197  1.4130242  0.32976122  0.1810193 0.009209486
## 2005-01-11 -0.94221569 -1.8591429 -0.95377959 -0.3623669 0.009130435

Extraemos los datos:

rESX<- rdata$EUROSTOX
rASML <- rdata$ASML
rBAYN <- rdata$BAYN
rBNP <- rdata$BNP
DTB3 <- rdata$DTB3

Calculamos los “clean data” para los valores:

rESXc <- Return.clean(rESX, method = "boudt")
rASMLc <- Return.clean(rASML, method = "boudt")
rBAYNc <- Return.clean(rBAYN, method = "boudt")
rBNPc <- Return.clean(rBNP, method = "boudt")

Veamos los retornos:

dygraph(rESX, main = "Rendimientos de índice EUROSTOX50") %>% dyRangeSelector(dateWindow = c("2008-01-01", "2017-04-01"))
dygraph(rASML, main = "Rendimientos de ASML Holding N.V.") %>% dyRangeSelector(dateWindow = c("2008-01-01", "2017-04-01"))
dygraph(rBAYN, main = "Rendimientos de Bayer AG") %>% dyRangeSelector(dateWindow = c("2008-01-01", "2017-04-01"))
dygraph(rBNP, main = "Rendimientos de BNP Paribas S.A.") %>% dyRangeSelector(dateWindow = c("2008-01-01", "2017-04-01"))

3 Apartado 1

En primer lugar vamos a verificar si alguno de los retornos de los activos tiene alguna estructura:

fitASML <- auto.arima(rASML, trace = TRUE, test = "kpss", ic="bic")
## 
##  ARIMA(2,0,2) with non-zero mean : 12354.17
##  ARIMA(0,0,0) with non-zero mean : 12331.67
##  ARIMA(1,0,0) with non-zero mean : 12337.19
##  ARIMA(0,0,1) with non-zero mean : 12336.85
##  ARIMA(0,0,0) with zero mean     : 12325.78
##  ARIMA(1,0,1) with non-zero mean : 12337.56
## 
##  Best model: ARIMA(0,0,0) with non-zero mean
fitBAYN <- auto.arima(rBAYN, trace = TRUE, test = "kpss", ic="bic")
## 
##  ARIMA(2,0,2) with non-zero mean : 11536.73
##  ARIMA(0,0,0) with non-zero mean : 11524.08
##  ARIMA(1,0,0) with non-zero mean : 11518.58
##  ARIMA(0,0,1) with non-zero mean : 11517.48
##  ARIMA(0,0,0) with zero mean     : 11518.02
##  ARIMA(1,0,1) with non-zero mean : 11521.54
##  ARIMA(0,0,2) with non-zero mean : 11523.64
##  ARIMA(1,0,2) with non-zero mean : 11528.65
##  ARIMA(0,0,1) with zero mean     : 11511.74
##  ARIMA(1,0,1) with zero mean     : 11516.29
##  ARIMA(0,0,2) with zero mean     : 11518.03
##  ARIMA(1,0,2) with zero mean     : 11523.57
## 
##  Best model: ARIMA(0,0,1) with zero mean
fitBNP <- auto.arima(rBNP, trace = TRUE, test = "kpss", ic="bic")
## 
##  ARIMA(2,0,2) with non-zero mean : 13446.54
##  ARIMA(0,0,0) with non-zero mean : 13427.1
##  ARIMA(1,0,0) with non-zero mean : 13435.52
##  ARIMA(0,0,1) with non-zero mean : 13434.59
##  ARIMA(0,0,0) with zero mean     : 13419.2
##  ARIMA(1,0,1) with non-zero mean : 13443.48
## 
##  Best model: ARIMA(0,0,0) with non-zero mean

Como podemos verificar, todos los activos no poseen estructura ARIMA. El siguiente punto será verificar si existe el efecto de ARCH en los rendimientos. Para ello utilizaremos los residuos al cuadrado de de los modelos fit.

Vemamos el contraste de Ljung-Box:

Box.test(fitASML$residuals^2, lag=12, type = "Ljung-Box")
## 
##  Box-Ljung test
## 
## data:  fitASML$residuals^2
## X-squared = 19.398, df = 12, p-value = 0.07938
Box.test(fitBAYN$residuals^2, lag=12, type = "Ljung-Box")
## 
##  Box-Ljung test
## 
## data:  fitBAYN$residuals^2
## X-squared = 63.662, df = 12, p-value = 4.814e-09
Box.test(fitBNP$residuals^2, lag=12, type = "Ljung-Box")
## 
##  Box-Ljung test
## 
## data:  fitBNP$residuals^2
## X-squared = 889.12, df = 12, p-value < 2.2e-16

Recordemos que el contraste de Ljung-Box enfrenta la hipótesis nula de que no hay efectos arch frente a la alternativa de que existen efectos arch. Dado que los p-valores son inferiores al 5% rechazamos la hipótesis nula de la no existencia del efecto arch, por lo que podemos inturir que nos enferntamos a los efectos del arch.

Vamos a especificar el modelo GARCH(1,1) con el modelo de la media (0,0), ya que son los resultados que obtuvimos en el ajuste de ARIMA.

Veamos la especificación:

garch11.spec <- ugarchspec(variance.model = list(garchOrder=c(1,1)),
                           mean.model = list(armaOrder=c(0,0)))

Especifiquemos los modelos arch de orden <=5 para cada activo:

arch.order <- 1:5
arch.names <- paste("arch", arch.order, sep=".")
arch.listASML <- list()
arch.listBAYN <- list()
arch.listBNP <- list()
for(p in arch.order) {
  arch.spec <- ugarchspec(variance.model = list(garchOrder=c(p,0)), 
                         mean.model = list(armaOrder=c(0,0)))
  arch.fitASML <- ugarchfit(spec=arch.spec, data=rASML,
                       solver.control=list(trace = 0))
  arch.listASML[[p]] <- arch.fitASML
  
  arch.fitBAYN <- ugarchfit(spec=arch.spec, data=rBAYN,
                       solver.control=list(trace = 0))
  arch.listBAYN[[p]] <- arch.fitBAYN
    
  arch.fitBNP <- ugarchfit(spec=arch.spec, data=rBNP,
                       solver.control=list(trace = 0))
  arch.listBNP[[p]] <- arch.fitBNP
}
names(arch.listASML) <- arch.names

info.matASML <- sapply(arch.listASML, infocriteria)
rownames(info.matASML) <- rownames(infocriteria(arch.listASML[[1]]))
info.matASML
##                arch.1   arch.2   arch.3   arch.4   arch.5
## Akaike       4.319460 4.311289 4.306464 4.289022 4.283014
## Bayes        4.325742 4.319664 4.316933 4.301585 4.297671
## Shibata      4.319458 4.311285 4.306458 4.289013 4.283002
## Hannan-Quinn 4.321726 4.314309 4.310240 4.293553 4.288300
names(arch.listBAYN) <- arch.names

info.matBAYN <- sapply(arch.listBAYN, infocriteria)
rownames(info.matBAYN) <- rownames(infocriteria(arch.listBAYN[[1]]))
info.matBAYN
##                arch.1   arch.2   arch.3   arch.4   arch.5
## Akaike       4.001734 3.984056 3.972407 4.622346 4.444896
## Bayes        4.008015 3.992431 3.982876 4.634908 4.459553
## Shibata      4.001732 3.984052 3.972401 4.622337 4.444884
## Hannan-Quinn 4.004000 3.987077 3.976183 4.626877 4.450183
names(arch.listBNP) <- arch.names

info.matBNP <- sapply(arch.listBNP, infocriteria)
rownames(info.matBNP) <- rownames(infocriteria(arch.listBNP[[1]]))
info.matBNP 
##                arch.1   arch.2   arch.3   arch.4   arch.5
## Akaike       4.574924 4.452698 4.407020 4.349775 4.322776
## Bayes        4.581205 4.461073 4.417489 4.362337 4.337432
## Shibata      4.574922 4.452694 4.407014 4.349766 4.322764
## Hannan-Quinn 4.577190 4.455719 4.410796 4.354306 4.328062

Los criterios de información para: * ASML: mejoran al aumentar el orden del ARCH * BAYN: señalan como el mejor modelo el ARCH(3) * BNP: mejoran al aumentar el orden del ARCH

Ahora vamos a probar a ejecutar el modelo para cada activo:

3.1 ASML

Vamos a comparar los resultados de GARCH(1,1) con ARCH(5)

garch11.fitASML <- ugarchfit(spec = garch11.spec, data=rASML)
garch11.fitASML
## 
## *---------------------------------*
## *          GARCH Model Fit        *
## *---------------------------------*
## 
## Conditional Variance Dynamics    
## -----------------------------------
## GARCH Model  : sGARCH(1,1)
## Mean Model   : ARFIMA(0,0,0)
## Distribution : norm 
## 
## Optimal Parameters
## ------------------------------------
##         Estimate  Std. Error  t value Pr(>|t|)
## mu      0.096673    0.037287   2.5927 0.009523
## omega   0.052327    0.007774   6.7308 0.000000
## alpha1  0.017665    0.002015   8.7664 0.000000
## beta1   0.970493    0.002130 455.5579 0.000000
## 
## Robust Standard Errors:
##         Estimate  Std. Error  t value Pr(>|t|)
## mu      0.096673    0.035350   2.7348 0.006242
## omega   0.052327    0.018122   2.8875 0.003883
## alpha1  0.017665    0.004289   4.1185 0.000038
## beta1   0.970493    0.001249 777.2193 0.000000
## 
## LogLikelihood : -6069.38 
## 
## Information Criteria
## ------------------------------------
##                    
## Akaike       4.2725
## Bayes        4.2809
## Shibata      4.2725
## Hannan-Quinn 4.2755
## 
## Weighted Ljung-Box Test on Standardized Residuals
## ------------------------------------
##                         statistic p-value
## Lag[1]                      1.350  0.2452
## Lag[2*(p+q)+(p+q)-1][2]     1.778  0.3025
## Lag[4*(p+q)+(p+q)-1][5]     4.240  0.2256
## d.o.f=0
## H0 : No serial correlation
## 
## Weighted Ljung-Box Test on Standardized Squared Residuals
## ------------------------------------
##                         statistic p-value
## Lag[1]                    0.04575  0.8306
## Lag[2*(p+q)+(p+q)-1][5]   0.11204  0.9976
## Lag[4*(p+q)+(p+q)-1][9]   0.34787  0.9995
## d.o.f=2
## 
## Weighted ARCH LM Tests
## ------------------------------------
##             Statistic Shape Scale P-Value
## ARCH Lag[3]  0.003163 0.500 2.000  0.9551
## ARCH Lag[5]  0.073108 1.440 1.667  0.9915
## ARCH Lag[7]  0.230110 2.315 1.543  0.9960
## 
## Nyblom stability test
## ------------------------------------
## Joint Statistic:  0.5661
## Individual Statistics:              
## mu     0.03133
## omega  0.12554
## alpha1 0.16117
## beta1  0.16596
## 
## Asymptotic Critical Values (10% 5% 1%)
## Joint Statistic:          1.07 1.24 1.6
## Individual Statistic:     0.35 0.47 0.75
## 
## Sign Bias Test
## ------------------------------------
##                    t-value   prob sig
## Sign Bias           0.3693 0.7120    
## Negative Sign Bias  0.5875 0.5569    
## Positive Sign Bias  1.2296 0.2190    
## Joint Effect        1.8689 0.6001    
## 
## 
## Adjusted Pearson Goodness-of-Fit Test:
## ------------------------------------
##   group statistic p-value(g-1)
## 1    20     116.1    5.938e-16
## 2    30     132.6    3.371e-15
## 3    40     136.3    1.036e-12
## 4    50     161.4    6.426e-14
## 
## 
## Elapsed time : 0.249001

vamos a probar la las estimación rolling en la que utilizaremos una ventana de 120 días con ajuste diario.

cl = makePSOCKcluster(10)
rollASML = ugarchroll(garch11.spec, rASML, n.start = 120, refit.every = 1,
refit.window = "moving", solver = "hybrid", calculate.VaR = TRUE,
VaR.alpha = c(0.01, 0.05), cluster = cl, keep.coef = TRUE)

Veamos los resultados al 1%:

report(rollASML, type="VaR", VaR.alpha = 0.01, conf.level= 0.99)
## VaR Backtest Report
## ===========================================
## Model:               sGARCH-norm
## Backtest Length: 2723
## Data:                
## 
## ==========================================
## alpha:               1%
## Expected Exceed: 27.2
## Actual VaR Exceed:   48
## Actual %:            1.8%
## 
## Unconditional Coverage (Kupiec)
## Null-Hypothesis: Correct Exceedances
## LR.uc Statistic: 13.041
## LR.uc Critical:      6.635
## LR.uc p-value:       0
## Reject Null:     YES
## 
## Conditional Coverage (Christoffersen)
## Null-Hypothesis: Correct Exceedances and
##                  Independence of Failures
## LR.cc Statistic: 13.068
## LR.cc Critical:      9.21
## LR.cc p-value:       0.001
## Reject Null:     YES

Veamos los resultados al 5%:

report(rollASML, type="VaR", VaR.alpha = 0.05, conf.level= 0.95)
## VaR Backtest Report
## ===========================================
## Model:               sGARCH-norm
## Backtest Length: 2723
## Data:                
## 
## ==========================================
## alpha:               5%
## Expected Exceed: 136.2
## Actual VaR Exceed:   128
## Actual %:            4.7%
## 
## Unconditional Coverage (Kupiec)
## Null-Hypothesis: Correct Exceedances
## LR.uc Statistic: 0.524
## LR.uc Critical:      3.841
## LR.uc p-value:       0.469
## Reject Null:     NO
## 
## Conditional Coverage (Christoffersen)
## Null-Hypothesis: Correct Exceedances and
##                  Independence of Failures
## LR.cc Statistic: 1.18
## LR.cc Critical:      5.991
## LR.cc p-value:       0.554
## Reject Null:     NO

Veamos algunos gráficos:

plot(garch11.fitASML, which=9)

plot(garch11.fitASML, which=10)

plot(garch11.fitASML, which=11)

plot(garch11.fitASML, which=12)

Comparación entre GARCH(1,1) y ARCH(5):

par(mfrow=c(2,1))
plot.ts(sigma(garch11.fitASML), main="GARCH(1,1) conditional vol",
        ylab="vol", col="blue")
plot.ts(sigma(arch.listASML$arch.5), main="ARCH(5) conditional vol",
        ylab="vol", col="blue")

par(mfrow=c(1,1))

En este caso podríamos elegir el GARCH(1,1) ya que sus criterios de información son mejores. Además, tiene menor número de parámetros lo que hace que el modelo sea más fiable a la hora de la explicación de los parámetros

3.2 BAYN

garch11.fitBAYN <- ugarchfit(spec = garch11.spec, data=rBAYN)
garch11.fitBAYN
## 
## *---------------------------------*
## *          GARCH Model Fit        *
## *---------------------------------*
## 
## Conditional Variance Dynamics    
## -----------------------------------
## GARCH Model  : sGARCH(1,1)
## Mean Model   : ARFIMA(0,0,0)
## Distribution : norm 
## 
## Optimal Parameters
## ------------------------------------
##         Estimate  Std. Error  t value Pr(>|t|)
## mu      0.088435    0.029618   2.9858 0.002828
## omega   0.024979    0.006374   3.9191 0.000089
## alpha1  0.035715    0.002839  12.5813 0.000000
## beta1   0.957694    0.002595 369.1005 0.000000
## 
## Robust Standard Errors:
##         Estimate  Std. Error  t value Pr(>|t|)
## mu      0.088435    0.026485   3.3390 0.000841
## omega   0.024979    0.014624   1.7081 0.087616
## alpha1  0.035715    0.005905   6.0481 0.000000
## beta1   0.957694    0.005268 181.7864 0.000000
## 
## LogLikelihood : -5539.376 
## 
## Information Criteria
## ------------------------------------
##                    
## Akaike       3.8997
## Bayes        3.9080
## Shibata      3.8997
## Hannan-Quinn 3.9027
## 
## Weighted Ljung-Box Test on Standardized Residuals
## ------------------------------------
##                         statistic  p-value
## Lag[1]                      9.846 0.001702
## Lag[2*(p+q)+(p+q)-1][2]     9.928 0.001996
## Lag[4*(p+q)+(p+q)-1][5]    10.885 0.005444
## d.o.f=0
## H0 : No serial correlation
## 
## Weighted Ljung-Box Test on Standardized Squared Residuals
## ------------------------------------
##                         statistic   p-value
## Lag[1]                      14.34 0.0001525
## Lag[2*(p+q)+(p+q)-1][5]     14.50 0.0006056
## Lag[4*(p+q)+(p+q)-1][9]     15.12 0.0033441
## d.o.f=2
## 
## Weighted ARCH LM Tests
## ------------------------------------
##             Statistic Shape Scale P-Value
## ARCH Lag[3]   0.06423 0.500 2.000  0.7999
## ARCH Lag[5]   0.17083 1.440 1.667  0.9722
## ARCH Lag[7]   0.86167 2.315 1.543  0.9350
## 
## Nyblom stability test
## ------------------------------------
## Joint Statistic:  0.3533
## Individual Statistics:              
## mu     0.04363
## omega  0.12801
## alpha1 0.11550
## beta1  0.09208
## 
## Asymptotic Critical Values (10% 5% 1%)
## Joint Statistic:          1.07 1.24 1.6
## Individual Statistic:     0.35 0.47 0.75
## 
## Sign Bias Test
## ------------------------------------
##                    t-value   prob sig
## Sign Bias           1.5320 0.1256    
## Negative Sign Bias  0.6513 0.5149    
## Positive Sign Bias  1.1752 0.2400    
## Joint Effect        4.9179 0.1779    
## 
## 
## Adjusted Pearson Goodness-of-Fit Test:
## ------------------------------------
##   group statistic p-value(g-1)
## 1    20     137.8    4.711e-20
## 2    30     163.7    9.736e-21
## 3    40     184.4    9.062e-21
## 4    50     180.6    5.926e-17
## 
## 
## Elapsed time : 0.2017839

vamos a probar la las estimación rolling en la que utilizaremos una ventana de 120 días con ajuste diario.

cl = makePSOCKcluster(10)
rollBAYN = ugarchroll(garch11.spec, rBAYN, n.start = 120, refit.every = 1,
refit.window = "moving", solver = "hybrid", calculate.VaR = TRUE,
VaR.alpha = c(0.01, 0.05), cluster = cl, keep.coef = TRUE)
## Warning: cerrando la conenexion 14 (<-Wolfshund:11969) que no esta siendo
## utilizada
## Warning: cerrando la conenexion 13 (<-Wolfshund:11969) que no esta siendo
## utilizada
## Warning: cerrando la conenexion 12 (<-Wolfshund:11969) que no esta siendo
## utilizada
## Warning: cerrando la conenexion 11 (<-Wolfshund:11969) que no esta siendo
## utilizada
## Warning: cerrando la conenexion 10 (<-Wolfshund:11969) que no esta siendo
## utilizada
## Warning: cerrando la conenexion 9 (<-Wolfshund:11969) que no esta siendo
## utilizada
## Warning: cerrando la conenexion 8 (<-Wolfshund:11969) que no esta siendo
## utilizada
## Warning: cerrando la conenexion 7 (<-Wolfshund:11969) que no esta siendo
## utilizada
## Warning: cerrando la conenexion 6 (<-Wolfshund:11969) que no esta siendo
## utilizada
## Warning: cerrando la conenexion 5 (<-Wolfshund:11969) que no esta siendo
## utilizada

Veamos los resultados al 1%:

report(rollBAYN, type="VaR", VaR.alpha = 0.01, conf.level= 0.99)
## VaR Backtest Report
## ===========================================
## Model:               sGARCH-norm
## Backtest Length: 2723
## Data:                
## 
## ==========================================
## alpha:               1%
## Expected Exceed: 27.2
## Actual VaR Exceed:   53
## Actual %:            1.9%
## 
## Unconditional Coverage (Kupiec)
## Null-Hypothesis: Correct Exceedances
## LR.uc Statistic: 19.3
## LR.uc Critical:      6.635
## LR.uc p-value:       0
## Reject Null:     YES
## 
## Conditional Coverage (Christoffersen)
## Null-Hypothesis: Correct Exceedances and
##                  Independence of Failures
## LR.cc Statistic: 19.301
## LR.cc Critical:      9.21
## LR.cc p-value:       0
## Reject Null:     YES

Veamos los resultados al 5%:

report(rollBAYN, type="VaR", VaR.alpha = 0.05, conf.level= 0.95)
## VaR Backtest Report
## ===========================================
## Model:               sGARCH-norm
## Backtest Length: 2723
## Data:                
## 
## ==========================================
## alpha:               5%
## Expected Exceed: 136.2
## Actual VaR Exceed:   155
## Actual %:            5.7%
## 
## Unconditional Coverage (Kupiec)
## Null-Hypothesis: Correct Exceedances
## LR.uc Statistic: 2.635
## LR.uc Critical:      3.841
## LR.uc p-value:       0.105
## Reject Null:     NO
## 
## Conditional Coverage (Christoffersen)
## Null-Hypothesis: Correct Exceedances and
##                  Independence of Failures
## LR.cc Statistic: 3.802
## LR.cc Critical:      5.991
## LR.cc p-value:       0.149
## Reject Null:     NO

Veamos algunos gráficos:

plot(garch11.fitASML, which=9)

plot(garch11.fitASML, which=10)

plot(garch11.fitASML, which=11)

plot(garch11.fitASML, which=12)

Comparación entre GARCH(1,1) y ARCH(3):

par(mfrow=c(2,1))
plot.ts(sigma(garch11.fitBAYN), main="GARCH(1,1) conditional vol",
        ylab="vol", col="blue")
plot.ts(sigma(arch.listBAYN$arch.3), main="ARCH(3) conditional vol",
        ylab="vol", col="blue")

par(mfrow=c(1,1))

En este caso al igual que en el anterior los criterios de información señalan el modelo GARCH(1,1) como mejor.

3.3 BNP

garch11.fitBNP <- ugarchfit(spec = garch11.spec, data=rBNP)
garch11.fitBNP
## 
## *---------------------------------*
## *          GARCH Model Fit        *
## *---------------------------------*
## 
## Conditional Variance Dynamics    
## -----------------------------------
## GARCH Model  : sGARCH(1,1)
## Mean Model   : ARFIMA(0,0,0)
## Distribution : norm 
## 
## Optimal Parameters
## ------------------------------------
##         Estimate  Std. Error  t value Pr(>|t|)
## mu      0.065288    0.031787   2.0539 0.039987
## omega   0.054400    0.015927   3.4155 0.000637
## alpha1  0.101417    0.012365   8.2017 0.000000
## beta1   0.893822    0.012696  70.4009 0.000000
## 
## Robust Standard Errors:
##         Estimate  Std. Error  t value Pr(>|t|)
## mu      0.065288    0.029197   2.2361 0.025346
## omega   0.054400    0.020847   2.6095 0.009069
## alpha1  0.101417    0.022644   4.4787 0.000008
## beta1   0.893822    0.021426  41.7158 0.000000
## 
## LogLikelihood : -6071.469 
## 
## Information Criteria
## ------------------------------------
##                    
## Akaike       4.2740
## Bayes        4.2824
## Shibata      4.2740
## Hannan-Quinn 4.2770
## 
## Weighted Ljung-Box Test on Standardized Residuals
## ------------------------------------
##                         statistic p-value
## Lag[1]                    0.06101 0.80490
## Lag[2*(p+q)+(p+q)-1][2]   1.04156 0.48495
## Lag[4*(p+q)+(p+q)-1][5]   7.32460 0.04323
## d.o.f=0
## H0 : No serial correlation
## 
## Weighted Ljung-Box Test on Standardized Squared Residuals
## ------------------------------------
##                         statistic p-value
## Lag[1]                     0.1005  0.7512
## Lag[2*(p+q)+(p+q)-1][5]    0.6038  0.9405
## Lag[4*(p+q)+(p+q)-1][9]    1.6258  0.9447
## d.o.f=2
## 
## Weighted ARCH LM Tests
## ------------------------------------
##             Statistic Shape Scale P-Value
## ARCH Lag[3]    0.1223 0.500 2.000  0.7265
## ARCH Lag[5]    0.9359 1.440 1.667  0.7521
## ARCH Lag[7]    1.7461 2.315 1.543  0.7708
## 
## Nyblom stability test
## ------------------------------------
## Joint Statistic:  1.0693
## Individual Statistics:              
## mu     0.05103
## omega  0.63217
## alpha1 0.15334
## beta1  0.23540
## 
## Asymptotic Critical Values (10% 5% 1%)
## Joint Statistic:          1.07 1.24 1.6
## Individual Statistic:     0.35 0.47 0.75
## 
## Sign Bias Test
## ------------------------------------
##                    t-value   prob sig
## Sign Bias           0.1285 0.8978    
## Negative Sign Bias  0.8903 0.3734    
## Positive Sign Bias  1.0459 0.2957    
## Joint Effect        3.0170 0.3890    
## 
## 
## Adjusted Pearson Goodness-of-Fit Test:
## ------------------------------------
##   group statistic p-value(g-1)
## 1    20     46.45    4.282e-04
## 2    30     68.71    4.544e-05
## 3    40     72.33    9.322e-04
## 4    50     91.77    2.069e-04
## 
## 
## Elapsed time : 0.1530271

vamos a probar la las estimación rolling en la que utilizaremos una ventana de 120 días con ajuste diario.

cl = makePSOCKcluster(10)
rollBNP = ugarchroll(garch11.spec, rBNP, n.start = 120, refit.every = 1,
refit.window = "moving", solver = "hybrid", calculate.VaR = TRUE,
VaR.alpha = c(0.01, 0.05), cluster = cl, keep.coef = TRUE)

Veamos los resultados al 1%:

report(rollBNP, type="VaR", VaR.alpha = 0.01, conf.level= 0.99)
## VaR Backtest Report
## ===========================================
## Model:               sGARCH-norm
## Backtest Length: 2723
## Data:                
## 
## ==========================================
## alpha:               1%
## Expected Exceed: 27.2
## Actual VaR Exceed:   57
## Actual %:            2.1%
## 
## Unconditional Coverage (Kupiec)
## Null-Hypothesis: Correct Exceedances
## LR.uc Statistic: 25.005
## LR.uc Critical:      6.635
## LR.uc p-value:       0
## Reject Null:     YES
## 
## Conditional Coverage (Christoffersen)
## Null-Hypothesis: Correct Exceedances and
##                  Independence of Failures
## LR.cc Statistic: 25.481
## LR.cc Critical:      9.21
## LR.cc p-value:       0
## Reject Null:     YES

Veamos los resultados al 5%:

report(rollBNP, type="VaR", VaR.alpha = 0.05, conf.level= 0.95)
## VaR Backtest Report
## ===========================================
## Model:               sGARCH-norm
## Backtest Length: 2723
## Data:                
## 
## ==========================================
## alpha:               5%
## Expected Exceed: 136.2
## Actual VaR Exceed:   158
## Actual %:            5.8%
## 
## Unconditional Coverage (Kupiec)
## Null-Hypothesis: Correct Exceedances
## LR.uc Statistic: 3.518
## LR.uc Critical:      3.841
## LR.uc p-value:       0.061
## Reject Null:     NO
## 
## Conditional Coverage (Christoffersen)
## Null-Hypothesis: Correct Exceedances and
##                  Independence of Failures
## LR.cc Statistic: 3.907
## LR.cc Critical:      5.991
## LR.cc p-value:       0.142
## Reject Null:     NO

Veamos algunos gráficos:

plot(garch11.fitASML, which=9)

plot(garch11.fitASML, which=10)

plot(garch11.fitASML, which=11)

plot(garch11.fitASML, which=12)

Vamos a comparar ARCH(5) con GARCH(1,1)

par(mfrow=c(2,1))
plot.ts(sigma(garch11.fitBNP), main="GARCH(1,1) conditional vol",
        ylab="vol", col="blue")
plot.ts(sigma(arch.listBNP$arch.5), main="ARCH(5) conditional vol",
        ylab="vol", col="blue")

par(mfrow=c(1,1))

En este caso, al igual que en los anteriores, el modelo GARCH(1,1) tiene mejores criterios de información, pero hay que fijarse que la volatilidad condicional se parece bastante entre los dos modeos.

4 Apartado 2: Ajuste de los modelos CAPM para los activos

En este apartado vamos a ajustar tres modelos distintos de CAPM para cada activos. Son los siguientes modelos:

  1. $(r - R_F) = (R_M - R_F) + $

  2. $(r - R_F) = (R_M - R_F) + $

  3. \(r = \alpha + \beta R_F + \varepsilon\)

Vamos a calcular los tipos reales de las rentabilidades y de mercado:

EX.ASML <- rASML-DTB3
EX.BAYN <- rBAYN-DTB3
EX.BNP <- rBNP-DTB3
EX.MKT <- rESX-DTB3

4.1 Modelos CAPM para ASML

Vamos a proceder el cálculo de los moodelos para ASML.

4.1.1 Modelo 1

m1.ASML <- lm(EX.ASML ~  EX.MKT - 1)
sm1.ASML <- summary(m1.ASML)
print(sm1.ASML)
## 
## Call:
## lm(formula = EX.ASML ~ EX.MKT - 1)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -22.8700  -0.8441   0.0230   0.9380  20.4790 
## 
## Coefficients:
##        Estimate Std. Error t value Pr(>|t|)    
## EX.MKT   0.7871     0.0236   33.35   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 1.79 on 2842 degrees of freedom
## Multiple R-squared:  0.2813, Adjusted R-squared:  0.2811 
## F-statistic:  1112 on 1 and 2842 DF,  p-value: < 2.2e-16
print(m1.ASML$coefficients)
##    EX.MKT 
## 0.7871427
m1.ASML.coef <- m1.ASML$coefficients
beta1.ASML <- m1.ASML.coef

cat("Beta ", beta1.ASML, "\n")
## Beta  0.7871427

Comprobamos los resíduos del modelo:

uhat1.ASML <- m1.ASML$residuals
uhat1.ASML <- as.xts(uhat1.ASML)
ggAcf(uhat1.ASML) + labs(title="Residuos CAPM1")

uhat21.ASML <- uhat1.ASML^2
ggAcf(uhat21.ASML) + labs(title="Residuos al cuadrado de CAPM1")

Box.test(uhat21.ASML, lag=12, type="Ljung-Box")
## 
##  Box-Ljung test
## 
## data:  uhat21.ASML
## X-squared = 3.3037, df = 12, p-value = 0.993
ArchTest(uhat1.ASML)
## 
##  ARCH LM-test; Null hypothesis: no ARCH effects
## 
## data:  uhat1.ASML
## Chi-squared = 3.1623, df = 12, p-value = 0.9943

4.1.2 Modelo 2

m2.ASML <- lm(EX.ASML ~  EX.MKT)
sm2.ASML <- summary(m2.ASML)
print(sm2.ASML)
## 
## Call:
## lm(formula = EX.ASML ~ EX.MKT)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -22.9354  -0.9098  -0.0434   0.8718  20.4130 
## 
## Coefficients:
##             Estimate Std. Error t value Pr(>|t|)    
## (Intercept)  0.06569    0.03356   1.957   0.0504 .  
## EX.MKT       0.78770    0.02359  33.392   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 1.789 on 2841 degrees of freedom
## Multiple R-squared:  0.2819, Adjusted R-squared:  0.2816 
## F-statistic:  1115 on 1 and 2841 DF,  p-value: < 2.2e-16
print(m2.ASML$coefficients)
## (Intercept)      EX.MKT 
##  0.06568963  0.78770490
m2.ASML.coef <- m2.ASML$coefficients
alpha2.ASML <- m2.ASML.coef[1]
beta2.ASML <- m2.ASML.coef[2]
cat("Alpha ", alpha2.ASML, "\n")
## Alpha  0.06568963
cat("Beta ", beta2.ASML, "\n")
## Beta  0.7877049

Comprobamos los resíduos del modelo:

uhat2.ASML <- m2.ASML$residuals
uhat2.ASML <- as.xts(uhat2.ASML)
ggAcf(uhat2.ASML) + labs(title="Residuos CAPM2")

uhat22.ASML <- uhat2.ASML^2
ggAcf(uhat22.ASML) + labs(title="Residuos al cuadrado de CAPM2")

Box.test(uhat22.ASML, lag=12, type="Ljung-Box")
## 
##  Box-Ljung test
## 
## data:  uhat22.ASML
## X-squared = 3.3444, df = 12, p-value = 0.9926
ArchTest(uhat2.ASML)
## 
##  ARCH LM-test; Null hypothesis: no ARCH effects
## 
## data:  uhat2.ASML
## Chi-squared = 3.2003, df = 12, p-value = 0.994

4.1.3 Modelo 3

m3.ASML <- lm(rASML ~ rESX)
sm3.ASML<- summary(m3.ASML)
print(sm3.ASML)
## 
## Call:
## lm(formula = rASML ~ rESX)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -22.9363  -0.9094  -0.0435   0.8729  20.4135 
## 
## Coefficients:
##             Estimate Std. Error t value Pr(>|t|)    
## (Intercept)  0.06670    0.03356   1.988    0.047 *  
## rESX         0.78768    0.02359  33.393   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 1.789 on 2841 degrees of freedom
## Multiple R-squared:  0.2819, Adjusted R-squared:  0.2816 
## F-statistic:  1115 on 1 and 2841 DF,  p-value: < 2.2e-16
m3.ASML.coef <- m3.ASML$coefficients
alpha3.ASML <- m3.ASML.coef[1]
beta3.ASML <- m3.ASML.coef[2]
cat("Alpha ",alpha3.ASML, "\n")
## Alpha  0.06670355
cat("Beta ", beta3.ASML, "\n")
## Beta  0.7876765

Comprobamos los resíduos del modelo:

uhat3.ASML <- m3.ASML$residuals
uhat3.ASML <- as.xts(uhat3.ASML)
ggAcf(uhat3.ASML) + labs(title="Residuos de  CAPM3")

uhat23.ASML <- uhat3.ASML^2
ggAcf(uhat23.ASML) + labs(title="Residuos al cuadrado de CAPM3")

Box.test(uhat23.ASML, lag=12, type="Ljung-Box")
## 
##  Box-Ljung test
## 
## data:  uhat23.ASML
## X-squared = 3.3437, df = 12, p-value = 0.9926
ArchTest(uhat3.ASML)
## 
##  ARCH LM-test; Null hypothesis: no ARCH effects
## 
## data:  uhat3.ASML
## Chi-squared = 3.1998, df = 12, p-value = 0.994

Veamos los criterios de información para ASML

AIC.ASML <- AIC(m1.ASML,m2.ASML,m3.ASML)
BIC.ASML <- BIC(m1.ASML,m2.ASML,m3.ASML)

info.capm.ASML <- cbind(AIC.ASML,BIC.ASML)
info.capm.ASML <- info.capm.ASML[,-c(1,3)]
rownames(info.capm.ASML) <- c("CAPM1", "CAPM2", "CAPM3")
colnames(info.capm.ASML) <- c("Akaike", "Schwarz")
info.capm.ASML
##         Akaike  Schwarz
## CAPM1 11382.32 11394.22
## CAPM2 11380.49 11398.34
## CAPM3 11380.46 11398.31

Dados los criterios de información y que el parámetro alpha es significativo se puede seleccionar el modelo CAPM3

4.2 Modelos CAPM para BAYN

Vamos a proceder el cálculo de los moodelos para BAYN.

4.2.1 Modelo 1

m1.BAYN <- lm(EX.BAYN ~  EX.MKT - 1)
sm1.BAYN <- summary(m1.BAYN)
print(sm1.BAYN)
## 
## Call:
## lm(formula = EX.BAYN ~ EX.MKT - 1)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -14.1472  -0.6158   0.0166   0.6725  25.8410 
## 
## Coefficients:
##        Estimate Std. Error t value Pr(>|t|)    
## EX.MKT  0.85975    0.01798   47.83   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 1.364 on 2842 degrees of freedom
## Multiple R-squared:  0.446,  Adjusted R-squared:  0.4458 
## F-statistic:  2288 on 1 and 2842 DF,  p-value: < 2.2e-16
print(m1.BAYN$coefficients)
##    EX.MKT 
## 0.8597525
m1.BAYN.coef <- m1.BAYN$coefficients
beta1.BAYN <- m1.BAYN.coef

cat("Beta ", beta1.BAYN, "\n")
## Beta  0.8597525

Comprobamos los resíduos del modelo:

uhat1.BAYN <- m1.BAYN$residuals
uhat1.BAYN <- as.xts(uhat1.BAYN)
ggAcf(uhat1.BAYN) + labs(title="Residuos CAPM1")

uhat21.BAYN <- uhat1.BAYN^2
ggAcf(uhat21.BAYN) + labs(title="Residuos al cuadrado de CAPM1")

Box.test(uhat21.BAYN, lag=12, type="Ljung-Box")
## 
##  Box-Ljung test
## 
## data:  uhat21.BAYN
## X-squared = 23.266, df = 12, p-value = 0.02555
ArchTest(uhat1.BAYN)
## 
##  ARCH LM-test; Null hypothesis: no ARCH effects
## 
## data:  uhat1.BAYN
## Chi-squared = 22.77, df = 12, p-value = 0.02975

4.2.2 Modelo 2

m2.BAYN <- lm(EX.BAYN ~  EX.MKT)
sm2.BAYN <- summary(m2.BAYN)
print(sm2.BAYN)
## 
## Call:
## lm(formula = EX.BAYN ~ EX.MKT)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -14.2051  -0.6733  -0.0408   0.6149  25.7838 
## 
## Coefficients:
##             Estimate Std. Error t value Pr(>|t|)    
## (Intercept)  0.05737    0.02556   2.245   0.0249 *  
## EX.MKT       0.86024    0.01796  47.886   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 1.363 on 2841 degrees of freedom
## Multiple R-squared:  0.4466, Adjusted R-squared:  0.4464 
## F-statistic:  2293 on 1 and 2841 DF,  p-value: < 2.2e-16
print(m2.BAYN$coefficients)
## (Intercept)      EX.MKT 
##  0.05737158  0.86024351
m2.BAYN.coef <- m2.BAYN$coefficients
alpha2.BAYN <- m2.BAYN.coef[1]
beta2.BAYN <- m2.BAYN.coef[2]
cat("Alpha ", alpha2.BAYN, "\n")
## Alpha  0.05737158
cat("Beta ", beta2.BAYN, "\n")
## Beta  0.8602435

Comprobamos los resíduos del modelo:

uhat2.BAYN <- m2.BAYN$residuals
uhat2.BAYN <- as.xts(uhat2.BAYN)
ggAcf(uhat2.BAYN) + labs(title="Residuos CAPM2")

uhat22.BAYN <- uhat2.BAYN^2
ggAcf(uhat22.BAYN) + labs(title="Residuos al cuadrado de CAPM2")

Box.test(uhat22.BAYN, lag=12, type="Ljung-Box")
## 
##  Box-Ljung test
## 
## data:  uhat22.BAYN
## X-squared = 23.817, df = 12, p-value = 0.02154
ArchTest(uhat2.BAYN)
## 
##  ARCH LM-test; Null hypothesis: no ARCH effects
## 
## data:  uhat2.BAYN
## Chi-squared = 23.286, df = 12, p-value = 0.02539

4.2.3 Modelo 3

m3.BAYN <- lm(rBAYN ~ rESX)
sm3.BAYN<- summary(m3.BAYN)
print(sm3.BAYN)
## 
## Call:
## lm(formula = rBAYN ~ rESX)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -14.2057  -0.6724  -0.0414   0.6143  25.7833 
## 
## Coefficients:
##             Estimate Std. Error t value Pr(>|t|)    
## (Intercept)  0.05804    0.02556   2.271   0.0232 *  
## rESX         0.86030    0.01796  47.891   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 1.363 on 2841 degrees of freedom
## Multiple R-squared:  0.4467, Adjusted R-squared:  0.4465 
## F-statistic:  2294 on 1 and 2841 DF,  p-value: < 2.2e-16
m3.BAYN.coef <- m3.BAYN$coefficients
alpha3.BAYN <- m3.BAYN.coef[1]
beta3.BAYN <- m3.BAYN.coef[2]
cat("Alpha ",alpha3.BAYN, "\n")
## Alpha  0.05803995
cat("Beta ", beta3.BAYN, "\n")
## Beta  0.860296

Comprobamos los resíduos del modelo:

uhat3.BAYN <- m3.BAYN$residuals
uhat3.BAYN <- as.xts(uhat3.BAYN)
ggAcf(uhat3.BAYN) + labs(title="Residuos de  CAPM3")

uhat23.BAYN <- uhat3.BAYN^2
ggAcf(uhat23.BAYN) + labs(title="Residuos al cuadrado de CAPM3")

Box.test(uhat23.BAYN, lag=12, type="Ljung-Box")
## 
##  Box-Ljung test
## 
## data:  uhat23.BAYN
## X-squared = 23.823, df = 12, p-value = 0.0215
ArchTest(uhat3.BAYN)
## 
##  ARCH LM-test; Null hypothesis: no ARCH effects
## 
## data:  uhat3.BAYN
## Chi-squared = 23.291, df = 12, p-value = 0.02535

Veamos los criterios de información para BAYN

AIC.BAYN <- AIC(m1.BAYN,m2.BAYN,m3.BAYN)
BIC.BAYN <- BIC(m1.BAYN,m2.BAYN,m3.BAYN)

info.capm.BAYN <- cbind(AIC.BAYN,BIC.BAYN)
info.capm.BAYN <- info.capm.BAYN[,-c(1,3)]
rownames(info.capm.BAYN) <- c("CAPM1", "CAPM2", "CAPM3")
colnames(info.capm.BAYN) <- c("Akaike", "Schwarz")
info.capm.BAYN
##         Akaike  Schwarz
## CAPM1 9834.585 9846.490
## CAPM2 9831.547 9849.405
## CAPM3 9831.586 9849.444

En este caso el parámetro alpha también es significativo, y nos ijaremos el el parmámetro de Akaike y, por tanto, podemos seleccionar el modelo CAMP2.

4.3 Modelos CAPM para BNP

Vamos a proceder el cálculo de los moodelos para BNP.

4.3.1 Modelo 1

m1.BNP <- lm(EX.BNP ~  EX.MKT - 1)
sm1.BNP <- summary(m1.BNP)
print(sm1.BNP)
## 
## Call:
## lm(formula = EX.BNP ~ EX.MKT - 1)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -18.5833  -0.6100  -0.0065   0.6661  12.7391 
## 
## Coefficients:
##        Estimate Std. Error t value Pr(>|t|)    
## EX.MKT  1.42108    0.02069    68.7   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 1.569 on 2842 degrees of freedom
## Multiple R-squared:  0.6241, Adjusted R-squared:  0.624 
## F-statistic:  4719 on 1 and 2842 DF,  p-value: < 2.2e-16
print(m1.BNP$coefficients)
##   EX.MKT 
## 1.421083
m1.BNP.coef <- m1.BNP$coefficients
beta1.BNP <- m1.BNP.coef

cat("Beta ", beta1.BNP, "\n")
## Beta  1.421083

Comprobamos los resíduos del modelo:

uhat1.BNP <- m1.BNP$residuals
uhat1.BNP <- as.xts(uhat1.BNP)
ggAcf(uhat1.BNP) + labs(title="Residuos CAPM1")
## Warning: cerrando la conenexion 24 (<-Wolfshund:11969) que no esta siendo
## utilizada
## Warning: cerrando la conenexion 23 (<-Wolfshund:11969) que no esta siendo
## utilizada
## Warning: cerrando la conenexion 22 (<-Wolfshund:11969) que no esta siendo
## utilizada
## Warning: cerrando la conenexion 21 (<-Wolfshund:11969) que no esta siendo
## utilizada
## Warning: cerrando la conenexion 20 (<-Wolfshund:11969) que no esta siendo
## utilizada
## Warning: cerrando la conenexion 19 (<-Wolfshund:11969) que no esta siendo
## utilizada
## Warning: cerrando la conenexion 18 (<-Wolfshund:11969) que no esta siendo
## utilizada
## Warning: cerrando la conenexion 17 (<-Wolfshund:11969) que no esta siendo
## utilizada
## Warning: cerrando la conenexion 16 (<-Wolfshund:11969) que no esta siendo
## utilizada
## Warning: cerrando la conenexion 15 (<-Wolfshund:11969) que no esta siendo
## utilizada

uhat21.BNP <- uhat1.BNP^2
ggAcf(uhat21.BNP) + labs(title="Residuos al cuadrado de CAPM1")

Box.test(uhat21.BNP, lag=12, type="Ljung-Box")
## 
##  Box-Ljung test
## 
## data:  uhat21.BNP
## X-squared = 850.29, df = 12, p-value < 2.2e-16
ArchTest(uhat1.BNP)
## 
##  ARCH LM-test; Null hypothesis: no ARCH effects
## 
## data:  uhat1.BNP
## Chi-squared = 386.47, df = 12, p-value < 2.2e-16

4.3.2 Modelo 2

m2.BNP <- lm(EX.BNP ~  EX.MKT)
sm2.BNP <- summary(m2.BNP)
print(sm2.BNP)
## 
## Call:
## lm(formula = EX.BNP ~ EX.MKT)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -18.6142  -0.6410  -0.0377   0.6354  12.7070 
## 
## Coefficients:
##             Estimate Std. Error t value Pr(>|t|)    
## (Intercept)  0.03096    0.02943   1.052    0.293    
## EX.MKT       1.42135    0.02069  68.707   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 1.569 on 2841 degrees of freedom
## Multiple R-squared:  0.6243, Adjusted R-squared:  0.6242 
## F-statistic:  4721 on 1 and 2841 DF,  p-value: < 2.2e-16
print(m2.BNP$coefficients)
## (Intercept)      EX.MKT 
##  0.03096202  1.42134778
m2.BNP.coef <- m2.BNP$coefficients
alpha2.BNP <- m2.BNP.coef[1]
beta2.BNP <- m2.BNP.coef[2]
cat("Alpha ", alpha2.BNP, "\n")
## Alpha  0.03096202
cat("Beta ", beta2.BNP, "\n")
## Beta  1.421348

Comprobamos los resíduos del modelo:

uhat2.BNP <- m2.BNP$residuals
uhat2.BNP <- as.xts(uhat2.BNP)
ggAcf(uhat2.BNP) + labs(title="Residuos CAPM2")

uhat22.BNP <- uhat2.BNP^2
ggAcf(uhat22.BNP) + labs(title="Residuos al cuadrado de CAPM2")

Box.test(uhat22.BNP, lag=12, type="Ljung-Box")
## 
##  Box-Ljung test
## 
## data:  uhat22.BNP
## X-squared = 847.95, df = 12, p-value < 2.2e-16
ArchTest(uhat2.BNP)
## 
##  ARCH LM-test; Null hypothesis: no ARCH effects
## 
## data:  uhat2.BNP
## Chi-squared = 386.48, df = 12, p-value < 2.2e-16

4.3.3 Modelo 3

m3.BNP <- lm(rBNP ~ rESX)
sm3.BNP<- summary(m3.BNP)
print(sm3.BNP)
## 
## Call:
## lm(formula = rBNP ~ rESX)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -18.6123  -0.6401  -0.0397   0.6359  12.7087 
## 
## Coefficients:
##             Estimate Std. Error t value Pr(>|t|)    
## (Intercept)  0.02895    0.02943   0.984    0.325    
## rESX         1.42133    0.02069  68.710   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 1.569 on 2841 degrees of freedom
## Multiple R-squared:  0.6243, Adjusted R-squared:  0.6242 
## F-statistic:  4721 on 1 and 2841 DF,  p-value: < 2.2e-16
m3.BNP.coef <- m3.BNP$coefficients
alpha3.BNP <- m3.BNP.coef[1]
beta3.BNP <- m3.BNP.coef[2]
cat("Alpha ",alpha3.BNP, "\n")
## Alpha  0.02894877
cat("Beta ", beta3.BNP, "\n")
## Beta  1.421333

Comprobamos los resíduos del modelo:

uhat3.BNP <- m3.BNP$residuals
uhat3.BNP <- as.xts(uhat3.BNP)
ggAcf(uhat3.BNP) + labs(title="Residuos de  CAPM3")

uhat23.BNP <- uhat3.BNP^2
ggAcf(uhat23.BNP) + labs(title="Residuos al cuadrado de CAPM3")

Box.test(uhat23.BNP, lag=12, type="Ljung-Box")
## 
##  Box-Ljung test
## 
## data:  uhat23.BNP
## X-squared = 848.03, df = 12, p-value < 2.2e-16
ArchTest(uhat3.BNP)
## 
##  ARCH LM-test; Null hypothesis: no ARCH effects
## 
## data:  uhat3.BNP
## Chi-squared = 386.47, df = 12, p-value < 2.2e-16

Veamos los criterios de información para BNP

AIC.BNP <- AIC(m1.BNP,m2.BNP,m3.BNP)
BIC.BNP <- BIC(m1.BNP,m2.BNP,m3.BNP)

info.capm.BNP <- cbind(AIC.BNP,BIC.BNP)
info.capm.BNP <- info.capm.BNP[,-c(1,3)]
rownames(info.capm.BNP) <- c("CAPM1", "CAPM2", "CAPM3")
colnames(info.capm.BNP) <- c("Akaike", "Schwarz")
info.capm.BNP
##         Akaike  Schwarz
## CAPM1 10633.07 10644.97
## CAPM2 10633.96 10651.82
## CAPM3 10633.95 10651.80

En este caso el parámetro de aplha no es significativo y los crtiterios de información señalan el modelo CAPM1 por lo que seleccionamos ese modelo.

4.4 Índices

En este apartado vamos a calcular tres índices: Sharpe, Treynor y Jensen, con el objetivo de ordenar los activos según su comportamiento.

4.4.1 índice de Sharpe

El Ratio de Sharpe se define como:

\[ Sharpe~Ratio = \frac{R_{Act}-R_{sin~riesgo}}{\sigma_{Act}}\]

Para simplificar suponemos que \(R_{sin~riesgo}=0\).

sharpe.ratio.ASML <- SharpeRatio(EX.ASML, FUN = "StdDev")
sharpe.ratio.BAYN <- SharpeRatio(EX.BAYN, FUN = "StdDev")
sharpe.ratio.BNP <- SharpeRatio(EX.BNP, FUN = "StdDev")
sharpe.ratio.ASML
##                                     ASML
## StdDev Sharpe (Rf=0%, p=95%): 0.02465285
sharpe.ratio.BAYN
##                                     BAYN
## StdDev Sharpe (Rf=0%, p=95%): 0.02318896
sharpe.ratio.BNP
##                                       BNP
## StdDev Sharpe (Rf=0%, p=95%): 0.002477528

4.4.2 índice de Treynor

El Ratio de Treynor se define como:

\[ Treynor~Ratio = \frac{R_{Act}-R_{sin~riesgo}}{\beta_{Act}}\].

Para simplificar suponemos que \(R_{sin~riesgo}=0\)

treynor.ratio.ASML <- mean(EX.ASML)/ m2.ASML$coeff[2]
treynor.ratio.BAYN <- mean(EX.BAYN)/ m2.BAYN$coeff[2]
treynor.ratio.BNP <- mean(EX.BNP)/ m2.BNP$coeff[2]

treynor<- cbind(treynor.ratio.ASML,treynor.ratio.BAYN,treynor.ratio.BNP)
colnames(treynor) <- c("ASML", "BAYN", "BNP")
rownames(treynor) <- c("Treynor Ratio")
treynor
##                     ASML       BAYN         BNP
## Treynor Ratio 0.06607171 0.04937026 0.004461577

4.4.3 índice de Jensen

El alpha de Jensen se define como:

\[ Alpha~de~Jensen = \alpha=(R_{Act}-R_{sin~riesgo})-(R_{Act}-R_{sin~riesgo})\beta_{Act}\] E alpha de Jensen equivale al valor de alpha que estimamos en nuestros modelos CAPM.

jensen.ratio.ASML <- alpha2.ASML
jensen.ratio.BAYN <- alpha2.BAYN
jensen.ratio.BNP <- alpha2.BNP

jensen <- cbind(jensen.ratio.ASML, jensen.ratio.BAYN,jensen.ratio.BNP)
colnames(jensen) <- c("ASML", "BAYN", "BNP")
rownames(jensen) <- c("Alpha")
jensen
##             ASML       BAYN        BNP
## Alpha 0.06568963 0.05737158 0.03096202

4.5 Comportamiento de los activos

Los activos con \(\beta < 1\) se consideran defensivos, ya que tienen menor variabilidad que la del mercado. Los fondos con \(\beta = 1\) se consideran neutros, ya que sus rendimientos se comportan como el mercado. Los fondos con \(\beta > 1\) se consideran agresivos, ya que tienen mayor variabilidad que la del mercado. Los fondos con \(\beta = 1\) se consideran neutros, ya que sus rendimientos se comportan como el mercado.

veamos la tabla con las betas de los activos para ver su comportamiento de acuerdo al mercado:

beta.activos <- cbind(beta1.ASML, beta1.BAYN, beta1.BNP)
colnames(beta.activos) <- c("ASML", "BAYN", "BNP")
rownames(beta.activos) <- c("Beta")
beta.activos
##           ASML      BAYN      BNP
## Beta 0.7871427 0.8597525 1.421083

Viendo como son los parámetros \(\beta\) ASML es el más defesivo seguido de BAYN que también lo es, pero en menor medida. El activo BNP es el que se comprta de forma agresiva.

5 Apartado 3: los parámetros de los CAPM y los índices son constantes en el tiempo?

En este apartado vamos a probar si los distintos parámetros elegidos son constantes o no en el tiempo.

Para conseguirlo vamos a probar dos maneras de demostrarlo: 1. Recursive. Vasmos a calcular la correlación recursiva de los parámetros fuiando como el comienzo en 100 2.Rolling. Vamos a cambiar del punto de vista fijando pantallas de 100 observaciones y moviendola día a día.

Este método lo aplicarmos a los CAPM2 para ver el comportamiento de los dos parámetros: alpha y beta, con el fin de tener una muestra homogénea.

5.1 ASML

Recursive:

ASML.matrix <- cbind(EX.ASML, EX.MKT)
ASML.matrix <- coredata(ASML.matrix)

ASML.X <- ASML.matrix[,2]
ASML.Y <- ASML.matrix[,1]


k0 <- 99
T <- length(ASML.X)
alpha.ASML <- beta.ASML <- array(dim=(T-k0))
for(i in 1:(T-k0))
{
  k <- k0 + i
  vX.ASML <- ASML.X[1:k]
  vY.ASML <- ASML.Y[1:k]
  m2.ASML <- lm(vY.ASML ~ vX.ASML)
  alpha.ASML[i] <-m2.ASML$coeff[1]
  beta.ASML[i] <- m2.ASML$coeff[2]
}

alpha.ASML <- xts(alpha.ASML, index(EX.ASML)[(k0+1):(T)])
beta.ASML <- xts(beta.ASML, index(EX.ASML)[(k0+1):(T)])
dygraph(alpha.ASML, main = "Recursive Alpha: ASML") %>% dyRangeSelector(dateWindow = c("2008-01-01", "2017-04-01"))
dygraph(beta.ASML, main = "Recursive Beta: ASML") %>% dyRangeSelector(dateWindow = c("2008-01-01", "2017-04-01"))

Rolling:

N <- 100
alpha.ASML <- beta.ASML <- array(dim=(T-N+1))
for(i in 1:(T-N+1))
{
  vX.ASML <- ASML.X[i:(i+N)]
  vY.ASML <- ASML.Y[i:(i+N)]
  m2.ASML <- lm(vY.ASML ~ vX.ASML)
  alpha.ASML[i] <-m2.ASML$coeff[1]
  beta.ASML[i] <- m2.ASML$coeff[2] 
}

alpha.ASML <- xts(alpha.ASML, index(EX.ASML)[(k0+1):(T)])
beta.ASML <- xts(beta.ASML, index(EX.ASML)[(k0+1):(T)])
dygraph(alpha.ASML, main = "Rolling Alpha: ASML") %>% dyRangeSelector(dateWindow = c("2008-01-01", "2017-04-01"))
dygraph(beta.ASML, main = "Rolling Beta: ASML") %>% dyRangeSelector(dateWindow = c("2008-01-01", "2017-04-01"))

5.2 BAYN

Recursive:

BAYN.matrix <- cbind(EX.BAYN, EX.MKT)
BAYN.matrix <- coredata(BAYN.matrix)

BAYN.X <- BAYN.matrix[,2]
BAYN.Y <- BAYN.matrix[,1]


k0 <- 99
T <- length(BAYN.X)
alpha.BAYN <- beta.BAYN <- array(dim=(T-k0))
for(i in 1:(T-k0))
{
  k <- k0 + i
  vX.BAYN <- BAYN.X[1:k]
  vY.BAYN <- BAYN.Y[1:k]
  m2.BAYN <- lm(vY.BAYN ~ vX.BAYN)
  alpha.BAYN[i] <-m2.BAYN$coeff[1]
  beta.BAYN[i] <- m2.BAYN$coeff[2]
}

alpha.BAYN <- xts(alpha.BAYN, index(EX.BAYN)[(k0+1):(T)])
beta.BAYN <- xts(beta.BAYN, index(EX.BAYN)[(k0+1):(T)])
dygraph(alpha.BAYN, main = "Recursive Alpha: BAYN") %>% dyRangeSelector(dateWindow = c("2008-01-01", "2017-04-01"))
dygraph(beta.BAYN, main = "Recursive Beta: BAYN") %>% dyRangeSelector(dateWindow = c("2008-01-01", "2017-04-01"))

Rolling:

N <- 100
alpha.BAYN <- beta.BAYN <- array(dim=(T-N+1))
for(i in 1:(T-N+1))
{
  vX.BAYN <- BAYN.X[i:(i+N)]
  vY.BAYN <- BAYN.Y[i:(i+N)]
  m2.BAYN <- lm(vY.BAYN ~ vX.BAYN)
  alpha.BAYN[i] <-m2.BAYN$coeff[1]
  beta.BAYN[i] <- m2.BAYN$coeff[2] 
}

alpha.BAYN <- xts(alpha.BAYN, index(EX.BAYN)[(k0+1):(T)])
beta.BAYN <- xts(beta.BAYN, index(EX.BAYN)[(k0+1):(T)])
dygraph(alpha.BAYN, main = "Rolling Alpha: BAYN") %>% dyRangeSelector(dateWindow = c("2008-01-01", "2017-04-01"))
dygraph(beta.BAYN, main = "Rolling Beta: BAYN") %>% dyRangeSelector(dateWindow = c("2008-01-01", "2017-04-01"))

5.3 BNP

Recursive:

BNP.matrix <- cbind(EX.BNP, EX.MKT)
BNP.matrix <- coredata(BNP.matrix)

BNP.X <- BNP.matrix[,2]
BNP.Y <- BNP.matrix[,1]


k0 <- 99
T <- length(BNP.X)
alpha.BNP <- beta.BNP <- array(dim=(T-k0))
for(i in 1:(T-k0))
{
  k <- k0 + i
  vX.BNP <- BNP.X[1:k]
  vY.BNP <- BNP.Y[1:k]
  m2.BNP <- lm(vY.BNP ~ vX.BNP)
  alpha.BNP[i] <-m2.BNP$coeff[1]
  beta.BNP[i] <- m2.BNP$coeff[2]
}

alpha.BNP <- xts(alpha.BNP, index(EX.BNP)[(k0+1):(T)])
beta.BNP <- xts(beta.BNP, index(EX.BNP)[(k0+1):(T)])
dygraph(alpha.BNP, main = "Recursive Alpha: BNP") %>% dyRangeSelector(dateWindow = c("2008-01-01", "2017-04-01"))
dygraph(beta.BNP, main = "Recursive Beta: BNP") %>% dyRangeSelector(dateWindow = c("2008-01-01", "2017-04-01"))

Rolling:

N <- 100
alpha.BNP <- beta.BNP <- array(dim=(T-N+1))
for(i in 1:(T-N+1))
{
  vX.BNP <- BNP.X[i:(i+N)]
  vY.BNP <- BNP.Y[i:(i+N)]
  m2.BNP <- lm(vY.BNP ~ vX.BNP)
  alpha.BNP[i] <-m2.BNP$coeff[1]
  beta.BNP[i] <- m2.BNP$coeff[2] 
}

alpha.BNP <- xts(alpha.BNP, index(EX.BNP)[(k0+1):(T)])
beta.BNP <- xts(beta.BNP, index(EX.BNP)[(k0+1):(T)])
dygraph(alpha.BNP, main = "Rolling Alpha: BNP") %>% dyRangeSelector(dateWindow = c("2008-01-01", "2017-04-01"))
dygraph(beta.BNP, main = "Rolling Beta: BNP") %>% dyRangeSelector(dateWindow = c("2008-01-01", "2017-04-01"))

5.4 Índice Sharpe

En el caso de comprobar la constancia o no del índce de Sharpe, vamos a proceder a calcular de forma recursiva el parámetro de forma que se obtendrán distindos índices para cada periodo, comenzando en la observación 100.

De la misma forma se comprobará con Rolling

Recursive Sharpe-ASML:

ASML.sh <- EX.ASML

k0 <- 99
T <- length(ASML.sh)
sharpe.ASML <- array(dim=(T-k0))
for(i in 1:(T-k0))
{
  k <- k0 + i
  v.ASML.sh <- ASML.sh[1:k]

  sharpe.ASML[i] <- SharpeRatio(v.ASML.sh, FUN = "StdDev")
}

sharpe.ASML <- xts(sharpe.ASML, index(EX.ASML)[(k0+1):(T)])
dygraph(sharpe.ASML, main = "Recursive Sharpe Ratio: ASML") %>% dyRangeSelector(dateWindow = c("2008-01-01", "2017-04-01"))

Rolling Sharpe-ASML:

for(i in 1:(T-k0))
{
  k <- k0 + i
  v.ASML.sh <- ASML.sh[i:k]

  sharpe.ASML[i] <- SharpeRatio(v.ASML.sh, FUN = "StdDev")
}


sharpe.ASML <- xts(sharpe.ASML, index(EX.ASML)[(k0+1):(T)])
dygraph(sharpe.ASML, main = "Rolling Sharpe ratio: ASML") %>% dyRangeSelector(dateWindow = c("2008-01-01", "2017-04-01"))

Recursive Sharpe-BAYN:

BAYN.sh <- EX.BAYN

k0 <- 99
T <- length(BAYN.sh)
sharpe.BAYN <- array(dim=(T-k0))
for(i in 1:(T-k0))
{
  k <- k0 + i
  v.BAYN.sh <- BAYN.sh[1:k]

  sharpe.BAYN[i] <- SharpeRatio(v.BAYN.sh, FUN = "StdDev")
}

sharpe.BAYN <- xts(sharpe.BAYN, index(EX.BAYN)[(k0+1):(T)])
dygraph(sharpe.BAYN, main = "Recursive Sharpe ratio: BAYN") %>% dyRangeSelector(dateWindow = c("2008-01-01", "2017-04-01"))

Rolling Sharpe-BAYN:

for(i in 1:(T-k0))
{
  k <- k0 + i
  v.BAYN.sh <- BAYN.sh[i:k]

  sharpe.BAYN[i] <- SharpeRatio(v.BAYN.sh, FUN = "StdDev")
}


sharpe.BAYN <- xts(sharpe.ASML, index(EX.BAYN)[(k0+1):(T)])
dygraph(sharpe.BAYN, main = "Rolling Sharpe ratio: BAYN") %>% dyRangeSelector(dateWindow = c("2008-01-01", "2017-04-01"))

Recursive Sharpe-BNP:

BNP.sh <- EX.BNP

k0 <- 99
T <- length(BNP.sh)
sharpe.BNP <- array(dim=(T-k0))
for(i in 1:(T-k0))
{
  k <- k0 + i
  v.BNP.sh <- BNP.sh[1:k]

  sharpe.BNP[i] <- SharpeRatio(v.BNP.sh, FUN = "StdDev")
}

sharpe.BNP <- xts(sharpe.BNP, index(EX.BNP)[(k0+1):(T)])
dygraph(sharpe.BNP, main = "Recursive Sharpe ratio: BNP") %>% dyRangeSelector(dateWindow = c("2008-01-01", "2017-04-01"))

Rolling Sharpe-BNP:

for(i in 1:(T-k0))
{
  k <- k0 + i
  v.BNP.sh <- BNP.sh[i:k]
  
  sharpe.BNP[i] <- SharpeRatio(v.BNP.sh, FUN = "StdDev")
}


sharpe.BNP <- xts(sharpe.BNP, index(EX.BNP)[(k0+1):(T)])
dygraph(sharpe.BNP, main = "Rolling Sharpe ratio: BNP") %>% dyRangeSelector(dateWindow = c("2008-01-01", "2017-04-01"))

5.5 Índice de Treynor

Dada la construcción del índice sabemos que se utiliza el parámetro beta para su cálculo y, por tanto, podemos aplicar la beta calculada para el procedimiento Rolling y demostrar que no es constante. Por el otro lado la rentabilidad también varía dependiendo del periodo estudado y dado que la beta ni la rentabilidad son constantes, la ratio de Treynor tampoco lo será.

Veamos el enfoque de utilizar la rentabilidad media y la beta de Rolling:

5.5.1 Índice de Treynor-ASML

trey.ASML <-mean(rASML)/beta.ASML

dygraph(trey.ASML, main = "Treynor Ratio con Recursive beta de ASML") %>% dyRangeSelector(dateWindow = c("2008-01-01", "2017-04-01"))

5.5.2 Índice de Treynor- BAYN

trey.BAYN <-mean(rBAYN)/beta.BAYN

dygraph(trey.BAYN, main = "Treynor Ratio con Recursive beta de BAYN") %>% dyRangeSelector(dateWindow = c("2008-01-01", "2017-04-01"))

5.5.3 Índice de Treynor- BNP

trey.BNP <-mean(rBNP)/beta.BNP

dygraph(trey.BNP, main = "Treynor Ratio con Recursive beta de BNP") %>% dyRangeSelector(dateWindow = c("2008-01-01", "2017-04-01"))

5.6 Alpha de Jensen

Se trata de alpha representado en de los modelos CAPM de los activos por tanto se demuestra también que dicho parámetro no es constante a lo largo del tiempo.

6 Apartado 4: problemas de heteroscedasticidad condicional?

En este apartado analizaremos que si los modelos elegidos en el apartado anterior presentan problemas de heteroscedasticidad condicional.

Hay que señalar que todos los modelos estimados contienen un análisis de los resíduos, veamos los casos para cada activo del modelo seleccionado

6.1 ASML

ggAcf(uhat3.ASML) + labs(title="Residuos de  CAPM3")

ggAcf(uhat23.ASML) + labs(title="Residuos al cuadrado de CAPM3")

Box.test(uhat23.ASML, lag=12, type="Ljung-Box")
## 
##  Box-Ljung test
## 
## data:  uhat23.ASML
## X-squared = 3.3437, df = 12, p-value = 0.9926
ArchTest(uhat3.ASML)
## 
##  ARCH LM-test; Null hypothesis: no ARCH effects
## 
## data:  uhat3.ASML
## Chi-squared = 3.1998, df = 12, p-value = 0.994

Dados los valores de los contrastes y contemplando los resíduos de este modelo observamos que no hay correlación entre los retardes de los errores y además el test de ARCH no indica esa estructura.

6.2 BAYN

ggAcf(uhat2.BAYN) + labs(title="Residuos CAPM2")

ggAcf(uhat22.BAYN) + labs(title="Residuos al cuadrado de CAPM2")

uhat22.BAYN2 <- uhat22.BAYN*uhat22.BAYN
Box.test(uhat22.BAYN2, lag=12, type="Ljung-Box")
## 
##  Box-Ljung test
## 
## data:  uhat22.BAYN2
## X-squared = 0.33705, df = 12, p-value = 1
ArchTest(uhat2.BAYN)
## 
##  ARCH LM-test; Null hypothesis: no ARCH effects
## 
## data:  uhat2.BAYN
## Chi-squared = 23.286, df = 12, p-value = 0.02539

En este caso observamos la no autocorrelación en los errores pero si los efectos ARCH. Dado que tenemos valores atípicos elevados es posible que tras ellos este algún tipo de estructura.

Vamos a ajustar el ARIMA que contangan los resíduos:

fit.uhat2.BAYN<- autoarfima(data =EX.BAYN, ar.max = 2, ma.max = 2, 
criterion = "AIC", method = "full")

fit.uhat2.BAYN
## $fit
## 
## *----------------------------------*
## *          ARFIMA Model Fit        *
## *----------------------------------*
## Mean Model   : ARFIMA(1,0,1)
## Distribution : norm 
## 
## Optimal Parameters
## ------------------------------------
##        Estimate  Std. Error  t value Pr(>|t|)
## mu     0.042632    0.029731   1.4339 0.151591
## ar1    0.472870    0.211502   2.2358 0.025367
## ma1   -0.542299    0.201938  -2.6855 0.007243
## sigma  1.825515    0.024209  75.4055 0.000000
## 
## Robust Standard Errors:
##        Estimate  Std. Error  t value Pr(>|t|)
## mu     0.042632    0.029348   1.4526  0.14633
## ar1    0.472870    0.350357   1.3497  0.17712
## ma1   -0.542299    0.336461  -1.6118  0.10701
## sigma  1.825515    0.097962  18.6349  0.00000
## 
## LogLikelihood : -5745.135 
## 
## Information Criteria
## ------------------------------------
##                    
## Akaike       4.0444
## Bayes        4.0528
## Shibata      4.0444
## Hannan-Quinn 4.0474
## 
## Weighted Ljung-Box Test on Standardized Residuals
## ------------------------------------
##                         statistic p-value
## Lag[1]                    0.02804  0.8670
## Lag[2*(p+q)+(p+q)-1][5]   1.19688  0.9999
## Lag[4*(p+q)+(p+q)-1][9]   5.27827  0.3925
## 
## H0 : No serial correlation
## 
## Weighted Ljung-Box Test on Standardized Squared Residuals
## ------------------------------------
##                         statistic   p-value
## Lag[1]                      18.67 1.557e-05
## Lag[2*(p+q)+(p+q)-1][2]     22.01 1.320e-06
## Lag[4*(p+q)+(p+q)-1][5]     31.22 1.429e-08
## 
## 
## ARCH LM Tests
## ------------------------------------
##              Statistic DoF   P-Value
## ARCH Lag[2]      23.66   2 7.280e-06
## ARCH Lag[5]      35.35   5 1.282e-06
## ARCH Lag[10]     43.82  10 3.552e-06
## 
## Nyblom stability test
## ------------------------------------
## Joint Statistic:  0.8551
## Individual Statistics:             
## mu    0.04045
## ar1   0.04003
## ma1   0.03835
## sigma 0.67980
## 
## Asymptotic Critical Values (10% 5% 1%)
## Joint Statistic:          1.07 1.24 1.6
## Individual Statistic:     0.35 0.47 0.75
## 
## 
## Elapsed time : 0.09501505 
## 
## 
## $rank.matrix
##    ar1 ar2 ma1 ma2 im arf      AIC converged
## 1    1   0   1   0  1   0 4.044415         1
## 2    1   0   1   0  0   0 4.044430         1
## 3    0   0   1   0  0   0 4.044691         1
## 4    1   0   1   1  1   0 4.044717         1
## 5    1   1   1   0  1   0 4.044735         1
## 6    0   0   1   0  1   0 4.044764         1
## 7    1   1   0   1  1   0 4.044783         1
## 8    0   1   1   1  1   0 4.044792         1
## 9    0   1   1   0  0   0 4.044793         1
## 10   0   0   1   1  0   0 4.044794         1
## 11   1   1   0   1  0   0 4.044822         1
## 12   0   0   1   1  1   0 4.044831         1
## 13   0   1   1   0  1   0 4.044832         1
## 14   0   1   1   1  0   0 4.044835         1
## 15   1   0   0   1  0   0 4.044863         1
## 16   1   1   0   0  0   0 4.044873         1
## 17   1   0   0   1  1   0 4.044905         1
## 18   1   1   0   0  1   0 4.044918         1
## 19   1   1   1   0  0   0 4.044923         1
## 20   1   0   0   0  0   0 4.044952         1
## 21   1   0   1   1  0   0 4.044991         1
## 22   1   0   0   0  1   0 4.045037         1
## 23   1   1   1   1  1   0 4.045486         1
## 24   1   1   1   1  0   0 4.045524         1
## 25   0   1   0   1  0   0 4.048993         1
## 26   0   1   0   1  1   0 4.049145         1
## 27   0   0   0   0  1   0 4.049197         1
## 28   0   1   0   0  0   0 4.049250         1
## 29   0   0   0   1  0   0 4.049262         1
## 30   0   1   0   0  1   0 4.049390         1
## 31   0   0   0   1  1   0 4.049403         1

Se trata de un ARIMA(1,0,1)

Ajustemos el GARCH con EX.MKT como variable exógena creando el CAPM:

garch11.BAYN.spec <- ugarchspec(variance.model = list(garchOrder=c(1,1),
                                                      model= "apARCH"), 
                             mean.model = list(armaOrder=c(1,1),
                                               external.regressors=EX.MKT),
                             distribution.model="jsu")
EX.BAYN.garch11.fit <- ugarchfit(spec=garch11.BAYN.spec, data=EX.BAYN,
                               solver.control=list(trace = 1))   
## 
## Iter: 1 fn: 4324.9198     Pars:   0.04890  0.98232 -0.98628  0.92335  0.01272  0.03313  0.96534  0.17555  1.01847  0.11691  1.41445
## Iter: 2 fn: 4324.9198     Pars:   0.04890  0.98232 -0.98628  0.92335  0.01271  0.03313  0.96535  0.17545  1.01826  0.11691  1.41444
## solnp--> Completed in 2 iterations
EX.BAYN.garch11.fit
## 
## *---------------------------------*
## *          GARCH Model Fit        *
## *---------------------------------*
## 
## Conditional Variance Dynamics    
## -----------------------------------
## GARCH Model  : apARCH(1,1)
## Mean Model   : ARFIMA(1,0,1)
## Distribution : jsu 
## 
## Optimal Parameters
## ------------------------------------
##         Estimate  Std. Error     t value Pr(>|t|)
## mu      0.048902    0.003271     14.9502 0.000000
## ar1     0.982320    0.003903    251.6852 0.000000
## ma1    -0.986280    0.000045 -21688.3685 0.000000
## mxreg1  0.923350    0.017140     53.8699 0.000000
## omega   0.012710    0.002958      4.2964 0.000017
## alpha1  0.033125    0.003170     10.4509 0.000000
## beta1   0.965346    0.000587   1645.0947 0.000000
## gamma1  0.175454    0.157252      1.1157 0.264530
## delta   1.018261    0.072209     14.1016 0.000000
## skew    0.116909    0.048795      2.3959 0.016579
## shape   1.414444    0.065333     21.6498 0.000000
## 
## Robust Standard Errors:
##         Estimate  Std. Error     t value Pr(>|t|)
## mu      0.048902    0.004196  1.1654e+01 0.000000
## ar1     0.982320    0.004113  2.3882e+02 0.000000
## ma1    -0.986280    0.000045 -2.1776e+04 0.000000
## mxreg1  0.923350    0.031205  2.9590e+01 0.000000
## omega   0.012710    0.003526  3.6048e+00 0.000312
## alpha1  0.033125    0.004721  7.0160e+00 0.000000
## beta1   0.965346    0.000773  1.2487e+03 0.000000
## gamma1  0.175454    0.184236  9.5233e-01 0.340929
## delta   1.018261    0.039754  2.5614e+01 0.000000
## skew    0.116909    0.050748  2.3037e+00 0.021240
## shape   1.414444    0.086428  1.6366e+01 0.000000
## 
## LogLikelihood : -4324.92 
## 
## Information Criteria
## ------------------------------------
##                    
## Akaike       3.0502
## Bayes        3.0733
## Shibata      3.0502
## Hannan-Quinn 3.0585
## 
## Weighted Ljung-Box Test on Standardized Residuals
## ------------------------------------
##                         statistic p-value
## Lag[1]                      2.968 0.08494
## Lag[2*(p+q)+(p+q)-1][5]     3.903 0.08468
## Lag[4*(p+q)+(p+q)-1][9]     5.278 0.39257
## d.o.f=2
## H0 : No serial correlation
## 
## Weighted Ljung-Box Test on Standardized Squared Residuals
## ------------------------------------
##                         statistic   p-value
## Lag[1]                      18.17 2.020e-05
## Lag[2*(p+q)+(p+q)-1][5]     18.41 5.281e-05
## Lag[4*(p+q)+(p+q)-1][9]     18.68 4.364e-04
## d.o.f=2
## 
## Weighted ARCH LM Tests
## ------------------------------------
##             Statistic Shape Scale P-Value
## ARCH Lag[3]    0.3254 0.500 2.000  0.5684
## ARCH Lag[5]    0.3742 1.440 1.667  0.9200
## ARCH Lag[7]    0.4798 2.315 1.543  0.9802
## 
## Nyblom stability test
## ------------------------------------
## Joint Statistic:  5.3931
## Individual Statistics:              
## mu     0.25792
## ar1    0.10757
## ma1    0.10634
## mxreg1 3.40026
## omega  0.62073
## alpha1 0.63672
## beta1  0.61846
## gamma1 0.03871
## delta  0.23885
## skew   0.22323
## shape  0.18656
## 
## Asymptotic Critical Values (10% 5% 1%)
## Joint Statistic:          2.49 2.75 3.27
## Individual Statistic:     0.35 0.47 0.75
## 
## Sign Bias Test
## ------------------------------------
##                    t-value      prob sig
## Sign Bias           0.3800 0.7039559    
## Negative Sign Bias  0.7169 0.4734927    
## Positive Sign Bias  3.3129 0.0009348 ***
## Joint Effect       12.2569 0.0065531 ***
## 
## 
## Adjusted Pearson Goodness-of-Fit Test:
## ------------------------------------
##   group statistic p-value(g-1)
## 1    20     20.01       0.3939
## 2    30     23.61       0.7480
## 3    40     34.99       0.6534
## 4    50     49.42       0.4563
## 
## 
## Elapsed time : 32.42306

Veamos la comparación entre los parámetros obtenidos en el modelo CAPM y el GARCH(1,1):

alpha.BAYN.GARCH <-as.data.frame(coef(EX.BAYN.garch11.fit)[6])
beta.BAYN.GARCH <- as.data.frame(coef(EX.BAYN.garch11.fit)[7])
garch.parmeters.BAYN <- cbind(alpha.BAYN.GARCH,beta.BAYN.GARCH, alpha2.BAYN,beta2.BAYN)
colnames(garch.parmeters.BAYN)<- c("Alpha-GARCH", "Beta-GARCH", "Alpha-CAPM2", "Beta-CAPM2")
rownames(garch.parmeters.BAYN)<- c("Parámetros")
garch.parmeters.BAYN
##            Alpha-GARCH Beta-GARCH Alpha-CAPM2 Beta-CAPM2
## Parámetros  0.03312527   0.965346  0.05737158  0.8602435

Como podemos observar no hay campbio sistancial

Veamos los errores del GARCH y su sigma.

uhat.garch.BAYN <- residuals(EX.BAYN.garch11.fit)
uhat.garch.BAYN2 <- uhat.garch.BAYN*uhat.garch.BAYN

ArchTest(uhat.garch.BAYN)
## 
##  ARCH LM-test; Null hypothesis: no ARCH effects
## 
## data:  uhat.garch.BAYN
## Chi-squared = 24.424, df = 12, p-value = 0.0178
Box.test(uhat.garch.BAYN2,lag=12, type = "Ljung-Box")
## 
##  Box-Ljung test
## 
## data:  uhat.garch.BAYN2
## X-squared = 25.015, df = 12, p-value = 0.01475
plot.ts(residuals(EX.BAYN.garch11.fit), ylab="e(t)", col="blue")
abline(h=0)

plot.ts(sigma(EX.BAYN.garch11.fit), ylab="sigma(t)", col="blue")

plot(EX.BAYN.garch11.fit, which=9)

plot(EX.BAYN.garch11.fit, which=10)

plot(EX.BAYN.garch11.fit, which=11)

plot(EX.BAYN.garch11.fit, which=12)

La ACF parece ser una normal por lo que nosquedamos con ese modelo, aunque la ACF de los rsiduos presenta elevados valores atípicos. El primer LAG significativo no desaparece al ajustar los distintos ARIMA.

6.2.1 índices BAYN

sharpe.garch.BAYN <-  mean(EX.BAYN)/mean(sigma(EX.BAYN.garch11.fit))
treynor.garch.BAYN <- mean(EX.BAYN)/as.data.frame(coef(EX.BAYN.garch11.fit)[6])
jensen.garch.BAYN <- as.data.frame(coef(EX.BAYN.garch11.fit)[5])

ind.capm.BAYN<- cbind(sharpe.ratio.BAYN,treynor.ratio.BAYN,jensen.ratio.BAYN)
colnames(ind.capm.BAYN) <- c("Sharpe", "Treynor", "Jensen")
ind.garch.BAYN<- cbind(sharpe.garch.BAYN,treynor.garch.BAYN,jensen.garch.BAYN)
colnames(ind.garch.BAYN) <- c("Sharpe", "Treynor", "Jensen")
compare.index.BAYN <- rbind(ind.capm.BAYN,ind.garch.BAYN)
rownames(compare.index.BAYN)<- c("Ratios CAPM", "Ratios GARCH")
compare.index.BAYN
##                  Sharpe    Treynor     Jensen
## Ratios CAPM  0.02318896 0.04937026 0.05737158
## Ratios GARCH 0.03535170 1.28211641 0.01271041

Se observa diferencias significativas entre las distintas ratios.

6.3 BNP

ggAcf(uhat2.BNP) + labs(title="Residuos CAPM2")

ggAcf(uhat22.BNP) + labs(title="Residuos al cuadrado de CAPM2")

uhat22.BNP2 <- uhat22.BNP*uhat22.BNP
Box.test(uhat22.BNP2, lag=12, type="Ljung-Box")
## 
##  Box-Ljung test
## 
## data:  uhat22.BNP2
## X-squared = 35.358, df = 12, p-value = 0.0004105
ArchTest(uhat2.BNP)
## 
##  ARCH LM-test; Null hypothesis: no ARCH effects
## 
## data:  uhat2.BNP
## Chi-squared = 386.48, df = 12, p-value < 2.2e-16

En este caso observamos la autocorrelación en los errores como los efectos ARCH.

Vamos a ajustar el ARIMA que contangan los resíduos:

fit.uhat2.BNP<- autoarfima(data =EX.BNP, ar.max = 3, ma.max = 3, 
                            criterion = "AIC", method = "full")

fit.uhat2.BNP
## $fit
## 
## *----------------------------------*
## *          ARFIMA Model Fit        *
## *----------------------------------*
## Mean Model   : ARFIMA(1,0,3)
## Distribution : norm 
## 
## Optimal Parameters
## ------------------------------------
##        Estimate  Std. Error  t value Pr(>|t|)
## ar1    0.532643    0.127180   4.1881  2.8e-05
## ma1   -0.522312    0.127628  -4.0925  4.3e-05
## ma2    0.000000          NA       NA       NA
## ma3   -0.098662    0.019702  -5.0077  1.0e-06
## sigma  2.544527    0.033769  75.3515  0.0e+00
## 
## Robust Standard Errors:
##        Estimate  Std. Error  t value Pr(>|t|)
## ar1    0.532643    0.224869   2.3687 0.017852
## ma1   -0.522312    0.231260  -2.2585 0.023911
## ma2    0.000000          NA       NA       NA
## ma3   -0.098662    0.039977  -2.4679 0.013590
## sigma  2.544527    0.077320  32.9089 0.000000
## 
## LogLikelihood : -6687.889 
## 
## Information Criteria
## ------------------------------------
##                    
## Akaike       4.7076
## Bayes        4.7160
## Shibata      4.7076
## Hannan-Quinn 4.7106
## 
## Weighted Ljung-Box Test on Standardized Residuals
## ------------------------------------
##                          statistic p-value
## Lag[1]                    0.008781  0.9253
## Lag[2*(p+q)+(p+q)-1][11]  3.457545  1.0000
## Lag[4*(p+q)+(p+q)-1][19] 10.233338  0.4299
## 
## H0 : No serial correlation
## 
## Weighted Ljung-Box Test on Standardized Squared Residuals
## ------------------------------------
##                         statistic p-value
## Lag[1]                      76.63       0
## Lag[2*(p+q)+(p+q)-1][2]    156.14       0
## Lag[4*(p+q)+(p+q)-1][5]    346.90       0
## 
## 
## ARCH LM Tests
## ------------------------------------
##              Statistic DoF P-Value
## ARCH Lag[2]      204.5   2       0
## ARCH Lag[5]      319.3   5       0
## ARCH Lag[10]     356.5  10       0
## 
## Nyblom stability test
## ------------------------------------
## Joint Statistic:  2.8013
## Individual Statistics:             
## ar1   0.02600
## ma1   0.01911
## ma3   0.02290
## sigma 2.59563
## 
## Asymptotic Critical Values (10% 5% 1%)
## Joint Statistic:          1.07 1.24 1.6
## Individual Statistic:     0.35 0.47 0.75
## 
## 
## Elapsed time : 0.06283402 
## 
## 
## $rank.matrix
##     ar1 ar2 ar3 ma1 ma2 ma3 im arf      AIC converged
## 1     1   0   0   1   0   1  0   0 4.707625         1
## 2     0   1   0   0   1   1  0   0 4.707753         1
## 3     1   0   1   1   0   1  0   0 4.707929         1
## 4     0   1   1   0   1   0  0   0 4.708082         1
## 5     1   0   1   1   0   0  0   0 4.708161         1
## 6     1   1   1   1   1   0  0   0 4.708166         1
## 7     0   0   1   0   0   1  0   0 4.708187         1
## 8     1   1   0   1   1   1  0   0 4.708197         1
## 9     1   1   0   1   0   1  0   0 4.708317         1
## 10    1   0   0   1   0   1  1   0 4.708319         1
## 11    1   0   0   1   1   1  0   0 4.708328         1
## 12    0   0   0   0   0   1  0   0 4.708367         1
## 13    1   1   0   0   1   1  0   0 4.708376         1
## 14    0   1   0   1   1   1  0   0 4.708425         1
## 15    0   1   1   0   1   1  0   0 4.708429         1
## 16    0   1   0   0   1   1  1   0 4.708447         1
## 17    1   0   1   1   0   1  1   0 4.708623         1
## 18    1   0   1   1   1   1  0   0 4.708632         1
## 19    1   1   1   1   0   1  0   0 4.708632         1
## 20    1   1   1   0   1   0  0   0 4.708641         1
## 21    0   1   1   1   1   0  0   0 4.708735         1
## 22    0   1   1   0   1   0  1   0 4.708777         1
## 23    1   1   1   1   0   0  0   0 4.708855         1
## 24    1   0   1   1   0   0  1   0 4.708856         1
## 25    1   1   1   1   1   0  1   0 4.708861         1
## 26    1   0   1   1   1   0  0   0 4.708864         1
## 27    1   1   1   1   1   1  0   0 4.708869         1
## 28    0   0   1   0   1   1  0   0 4.708874         1
## 29    0   0   1   0   0   1  1   0 4.708882         1
## 30    1   0   1   0   0   1  0   0 4.708884         1
## 31    0   0   1   1   0   1  0   0 4.708888         1
## 32    0   1   1   0   0   1  0   0 4.708890         1
## 33    1   1   0   1   1   1  1   0 4.708892         1
## 34    1   0   0   0   0   1  0   0 4.708940         1
## 35    0   1   0   0   0   1  0   0 4.708989         1
## 36    0   0   0   1   0   1  0   0 4.708998         1
## 37    1   1   0   1   0   1  1   0 4.709011         1
## 38    1   0   0   1   1   1  1   0 4.709024         1
## 39    0   0   0   0   1   1  0   0 4.709040         1
## 40    0   0   0   0   0   1  1   0 4.709063         1
## 41    1   1   0   0   1   1  1   0 4.709071         1
## 42    1   1   1   0   1   1  0   0 4.709077         1
## 43    0   0   1   0   0   0  0   0 4.709098         1
## 44    0   1   1   1   1   1  0   0 4.709119         1
## 45    0   1   0   1   1   1  1   0 4.709119         1
## 46    0   1   1   0   1   1  1   0 4.709123         1
## 47    1   0   1   1   1   1  1   0 4.709326         1
## 48    1   1   1   1   0   1  1   0 4.709326         1
## 49    1   1   1   0   1   0  1   0 4.709336         1
## 50    0   1   1   1   1   0  1   0 4.709430         1
## 51    1   1   1   1   0   0  1   0 4.709549         1
## 52    1   0   1   1   1   0  1   0 4.709558         1
## 53    1   1   1   1   1   1  1   0 4.709563         1
## 54    1   1   0   0   0   1  0   0 4.709564         1
## 55    0   0   1   0   1   1  1   0 4.709569         1
## 56    1   0   1   0   1   1  0   0 4.709572         1
## 57    0   0   1   1   1   1  0   0 4.709573         1
## 58    1   0   1   0   0   1  1   0 4.709579         1
## 59    0   0   1   1   0   1  1   0 4.709583         1
## 60    0   1   1   0   0   1  1   0 4.709586         1
## 61    1   1   1   0   0   1  0   0 4.709587         1
## 62    0   1   1   1   0   1  0   0 4.709591         1
## 63    1   0   1   0   0   0  0   0 4.709603         1
## 64    1   0   0   0   1   1  0   0 4.709612         1
## 65    0   1   0   1   0   1  0   0 4.709618         1
## 66    1   0   0   0   0   1  1   0 4.709636         1
## 67    0   1   1   0   0   0  0   0 4.709643         1
## 68    0   0   1   1   0   0  0   0 4.709653         1
## 69    0   0   0   1   1   1  0   0 4.709668         1
## 70    0   1   0   0   0   1  1   0 4.709685         1
## 71    0   0   0   1   0   1  1   0 4.709693         1
## 72    0   0   1   0   1   0  0   0 4.709710         1
## 73    0   0   0   0   1   1  1   0 4.709735         1
## 74    1   1   1   0   1   1  1   0 4.709771         1
## 75    0   0   1   0   0   0  1   0 4.709794         1
## 76    0   1   1   1   1   1  1   0 4.709813         1
## 77    1   1   1   0   0   0  0   0 4.710154         1
## 78    0   1   1   1   0   0  0   0 4.710199         1
## 79    1   0   1   0   1   0  0   0 4.710219         1
## 80    1   1   0   0   0   1  1   0 4.710260         1
## 81    0   0   1   1   1   0  0   0 4.710264         1
## 82    1   0   1   0   1   1  1   0 4.710268         1
## 83    0   0   1   1   1   1  1   0 4.710269         1
## 84    1   1   1   0   0   1  1   0 4.710283         1
## 85    0   1   1   1   0   1  1   0 4.710286         1
## 86    1   0   1   0   0   0  1   0 4.710300         1
## 87    1   0   0   0   1   1  1   0 4.710309         1
## 88    0   1   0   1   0   1  1   0 4.710314         1
## 89    0   1   1   0   0   0  1   0 4.710340         1
## 90    0   0   1   1   0   0  1   0 4.710349         1
## 91    0   0   0   1   1   1  1   0 4.710363         1
## 92    0   0   1   0   1   0  1   0 4.710407         1
## 93    1   1   1   0   0   0  1   0 4.710850         1
## 94    0   1   1   1   0   0  1   0 4.710896         1
## 95    1   0   1   0   1   0  1   0 4.710915         1
## 96    0   0   1   1   1   0  1   0 4.710960         1
## 97    1   1   0   1   1   0  0   0 4.715757         1
## 98    1   0   0   1   1   0  0   0 4.716227         1
## 99    1   1   0   1   1   0  1   0 4.716455         1
## 100   1   1   0   1   0   0  0   0 4.716496         1
## 101   0   1   0   1   1   0  0   0 4.716772         1
## 102   1   0   0   1   1   0  1   0 4.716925         1
## 103   1   1   0   0   1   0  0   0 4.716945         1
## 104   1   1   0   1   0   0  1   0 4.717193         1
## 105   0   1   0   1   1   0  1   0 4.717469         1
## 106   1   1   0   0   1   0  1   0 4.717643         1
## 107   1   0   0   0   0   0  0   0 4.718466         1
## 108   0   0   0   1   0   0  0   0 4.718470         1
## 109   0   0   0   0   1   0  0   0 4.718498         1
## 110   0   1   0   0   0   0  0   0 4.718504         1
## 111   0   0   0   0   0   0  1   0 4.718625         1
## 112   0   0   0   1   1   0  0   0 4.718970         1
## 113   1   0   0   0   1   0  0   0 4.719009         1
## 114   0   1   0   1   0   0  0   0 4.719015         1
## 115   1   1   0   0   0   0  0   0 4.719045         1
## 116   0   1   0   0   1   0  0   0 4.719140         1
## 117   1   0   0   0   0   0  1   0 4.719164         1
## 118   0   0   0   1   0   0  1   0 4.719168         1
## 119   1   0   0   1   0   0  0   0 4.719171         1
## 120   0   0   0   0   1   0  1   0 4.719196         1
## 121   0   1   0   0   0   0  1   0 4.719201         1
## 122   0   0   0   1   1   0  1   0 4.719667         1
## 123   1   0   0   0   1   0  1   0 4.719707         1
## 124   0   1   0   1   0   0  1   0 4.719712         1
## 125   1   1   0   0   0   0  1   0 4.719743         1
## 126   0   1   0   0   1   0  1   0 4.719838         1
## 127   1   0   0   1   0   0  1   0 4.719868         1

Se trata de un ARIMA(1,0,1)

Ajustemos el GARCH con EX.MKT como variable exógena creando el CAPM:

Nota: aunque el ajuste autoarfima marca un ARIMA(1,0,3), los parámetros no son significativos, tras reajustes se observa que el parámetro AR(1) es significativo.

garch11.BNP.spec <- ugarchspec(variance.model = list(garchOrder=c(1,1)), 
                                mean.model = list(armaOrder=c(1,0),
                                                  external.regressors=EX.MKT),
                                distribution.model="sstd")
EX.BNP.garch11.fit <- ugarchfit(spec=garch11.BNP.spec, data=EX.BNP,
                                 solver.control=list(trace = 1))   
## 
## Iter: 1 fn: 4359.2513     Pars:  0.02875 0.05359 1.31046 0.01409 0.07319 0.92374 1.08822 4.75504
## Iter: 2 fn: 4359.2513     Pars:  0.02872 0.05359 1.31046 0.01408 0.07320 0.92375 1.08817 4.75471
## solnp--> Completed in 2 iterations
EX.BNP.garch11.fit
## 
## *---------------------------------*
## *          GARCH Model Fit        *
## *---------------------------------*
## 
## Conditional Variance Dynamics    
## -----------------------------------
## GARCH Model  : sGARCH(1,1)
## Mean Model   : ARFIMA(1,0,0)
## Distribution : sstd 
## 
## Optimal Parameters
## ------------------------------------
##         Estimate  Std. Error  t value Pr(>|t|)
## mu      0.028720    0.019539   1.4699 0.141585
## ar1     0.053588    0.018446   2.9051 0.003671
## mxreg1  1.310459    0.016890  77.5868 0.000000
## omega   0.014076    0.005341   2.6352 0.008408
## alpha1  0.073198    0.013062   5.6040 0.000000
## beta1   0.923747    0.013074  70.6543 0.000000
## skew    1.088174    0.028134  38.6781 0.000000
## shape   4.754711    0.436213  10.9000 0.000000
## 
## Robust Standard Errors:
##         Estimate  Std. Error  t value Pr(>|t|)
## mu      0.028720    0.019686   1.4589 0.144597
## ar1     0.053588    0.018170   2.9492 0.003186
## mxreg1  1.310459    0.029471  44.4661 0.000000
## omega   0.014076    0.006451   2.1818 0.029121
## alpha1  0.073198    0.019440   3.7654 0.000166
## beta1   0.923747    0.019089  48.3905 0.000000
## skew    1.088174    0.030460  35.7253 0.000000
## shape   4.754711    0.467590  10.1686 0.000000
## 
## LogLikelihood : -4359.251 
## 
## Information Criteria
## ------------------------------------
##                    
## Akaike       3.0723
## Bayes        3.0890
## Shibata      3.0723
## Hannan-Quinn 3.0783
## 
## Weighted Ljung-Box Test on Standardized Residuals
## ------------------------------------
##                         statistic p-value
## Lag[1]                     0.8355  0.3607
## Lag[2*(p+q)+(p+q)-1][2]    0.8366  0.8326
## Lag[4*(p+q)+(p+q)-1][5]    1.7988  0.7683
## d.o.f=1
## H0 : No serial correlation
## 
## Weighted Ljung-Box Test on Standardized Squared Residuals
## ------------------------------------
##                         statistic p-value
## Lag[1]                     0.4998  0.4796
## Lag[2*(p+q)+(p+q)-1][5]    1.5763  0.7211
## Lag[4*(p+q)+(p+q)-1][9]    2.6114  0.8210
## d.o.f=2
## 
## Weighted ARCH LM Tests
## ------------------------------------
##             Statistic Shape Scale P-Value
## ARCH Lag[3]    0.1723 0.500 2.000  0.6781
## ARCH Lag[5]    1.4603 1.440 1.667  0.6028
## ARCH Lag[7]    1.8567 2.315 1.543  0.7476
## 
## Nyblom stability test
## ------------------------------------
## Joint Statistic:  4.5771
## Individual Statistics:              
## mu     0.08178
## ar1    0.21932
## mxreg1 1.64957
## omega  1.06128
## alpha1 0.52974
## beta1  0.76913
## skew   0.41568
## shape  1.40195
## 
## Asymptotic Critical Values (10% 5% 1%)
## Joint Statistic:          1.89 2.11 2.59
## Individual Statistic:     0.35 0.47 0.75
## 
## Sign Bias Test
## ------------------------------------
##                    t-value    prob sig
## Sign Bias          0.79491 0.42673    
## Negative Sign Bias 1.77705 0.07567   *
## Positive Sign Bias 0.05616 0.95522    
## Joint Effect       3.16490 0.36689    
## 
## 
## Adjusted Pearson Goodness-of-Fit Test:
## ------------------------------------
##   group statistic p-value(g-1)
## 1    20     4.858       0.9995
## 2    30    15.210       0.9833
## 3    40    32.343       0.7657
## 4    50    38.024       0.8720
## 
## 
## Elapsed time : 14.04797

Veamos la comparación entre los parámetros obtenidos en el modelo CAPM y el GARCH(1,1):

alpha.BNP.GARCH <-as.data.frame(coef(EX.BNP.garch11.fit)[5])
beta.BNP.GARCH <- as.data.frame(coef(EX.BNP.garch11.fit)[6])
garch.parmeters.BNP <- cbind(alpha.BNP.GARCH,beta.BNP.GARCH, alpha2.BNP,beta2.BNP)
colnames(garch.parmeters.BNP)<- c("Alpha-GARCH", "Beta-GARCH", "Alpha-CAPM2", "Beta-CAPM2")
rownames(garch.parmeters.BNP)<- c("Parámetros")
garch.parmeters.BNP
##            Alpha-GARCH Beta-GARCH Alpha-CAPM2 Beta-CAPM2
## Parámetros  0.07319824  0.9237467  0.03096202   1.421348
plot(EX.BNP.garch11.fit, which=9)

plot(EX.BNP.garch11.fit, which=10)

plot(EX.BNP.garch11.fit, which=11)

plot(EX.BNP.garch11.fit, which=12)

En este caso observamos que la ACF parece ser ruido blanco.

6.3.1 índices BNP

sharpe.garch.BNP <-  mean(EX.BNP)/mean(sigma(EX.BNP.garch11.fit))
treynor.garch.BNP <- mean(EX.BNP)/as.data.frame(coef(EX.BNP.garch11.fit)[6])
jensen.garch.BNP <- as.data.frame(coef(EX.BNP.garch11.fit)[5])

ind.capm.BNP<- cbind(sharpe.ratio.BNP,treynor.ratio.BNP,jensen.ratio.BNP)
colnames(ind.capm.BNP) <- c("Sharpe", "Treynor", "Jensen")
ind.garch.BNP<- cbind(sharpe.garch.BNP,treynor.garch.BNP,jensen.garch.BNP)
colnames(ind.garch.BNP) <- c("Sharpe", "Treynor", "Jensen")
compare.index.BNP <- rbind(ind.capm.BNP,ind.garch.BNP)
rownames(compare.index.BNP)<- c("Ratios CAPM", "Ratios GARCH")
compare.index.BNP
##                   Sharpe     Treynor     Jensen
## Ratios CAPM  0.002477528 0.004461577 0.03096202
## Ratios GARCH 0.004708877 0.006864925 0.07319824

Como se puede observar, las ratio apriximadamente se duplican.

7 Apartado 5: relación de cointegración?

En este apartado comprobaremos si existe alguna relación de cointegración entre los parámetros.

Vamos a calcular los logaritmos de los precios:

ldata <- log(fdata)
ldata <- na.omit(ldata)

lASML <- ldata$ASML
lBAYN <- ldata$BAYN
lBNP <- ldata$BNP

Vamos a convertir los datos a zoo:

asml<-as.zoo(lASML)
bayn<-as.zoo(lBAYN)
bnp<-as.zoo(lBNP)

7.1 Engle-Granger

7.1.1 ASML-BAYN~1

Primer paso:

e1 <- dynlm(asml ~ bayn)
print(summary(e1))
## 
## Time series regression with "zoo" data:
## Start = 2005-01-03, End = 2017-04-07
## 
## Call:
## dynlm(formula = asml ~ bayn)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -0.61288 -0.13936 -0.01707  0.14359  0.54364 
## 
## Coefficients:
##              Estimate Std. Error t value Pr(>|t|)    
## (Intercept) -1.718403   0.030570  -56.21   <2e-16 ***
## bayn         1.319165   0.007674  171.91   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.2198 on 3038 degrees of freedom
## Multiple R-squared:  0.9068, Adjusted R-squared:  0.9068 
## F-statistic: 2.955e+04 on 1 and 3038 DF,  p-value: < 2.2e-16
tsdisplay(e1$residuals)

z1 <- e1$residuals

La ACF sugiere que los residuos no son estacionarios. Puede haber una raíz unitaria.

Segundo paso:

Aplicamos la ADF a \(z1\)

  1. Con constante y tendencia
z1.df3 <- ur.df(z1, type="trend", selectlags = "BIC")
print(summary(z1.df3))
## 
## ############################################### 
## # Augmented Dickey-Fuller Test Unit Root Test # 
## ############################################### 
## 
## Test regression trend 
## 
## 
## Call:
## lm(formula = z.diff ~ z.lag.1 + 1 + tt + z.diff.lag)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -0.32684 -0.01357 -0.00058  0.01300  0.19345 
## 
## Coefficients:
##               Estimate Std. Error t value Pr(>|t|)   
## (Intercept) -1.102e-03  9.597e-04  -1.149  0.25080   
## z.lag.1     -7.026e-03  2.210e-03  -3.179  0.00149 **
## tt           7.906e-07  5.525e-07   1.431  0.15252   
## z.diff.lag  -3.619e-02  1.815e-02  -1.994  0.04621 * 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.02557 on 3034 degrees of freedom
## Multiple R-squared:  0.004982,   Adjusted R-squared:  0.003998 
## F-statistic: 5.064 on 3 and 3034 DF,  p-value: 0.001688
## 
## 
## Value of test-statistic is: -3.1792 3.4811 5.1979 
## 
## Critical values for test statistics: 
##       1pct  5pct 10pct
## tau3 -3.96 -3.41 -3.12
## phi2  6.09  4.68  4.03
## phi3  8.27  6.25  5.34
plot(z1.df3)

NO podemos rechazar la \(H_0\) por lo que parace que existe la raíz unitaria.

Dado el valor crítico de \(\phi_3\) podemos eliminar la tendencia.

z1.df2 <- ur.df(z1, type="drift", selectlags = "BIC")
print(summary(z1.df2))
## 
## ############################################### 
## # Augmented Dickey-Fuller Test Unit Root Test # 
## ############################################### 
## 
## Test regression drift 
## 
## 
## Call:
## lm(formula = z.diff ~ z.lag.1 + 1 + z.diff.lag)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -0.32743 -0.01326 -0.00052  0.01291  0.19332 
## 
## Coefficients:
##               Estimate Std. Error t value Pr(>|t|)   
## (Intercept)  9.991e-05  4.640e-04   0.215  0.82953   
## z.lag.1     -6.114e-03  2.116e-03  -2.889  0.00389 **
## z.diff.lag  -3.642e-02  1.815e-02  -2.007  0.04484 * 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.02557 on 3035 degrees of freedom
## Multiple R-squared:  0.004311,   Adjusted R-squared:  0.003655 
## F-statistic:  6.57 on 2 and 3035 DF,  p-value: 0.001422
## 
## 
## Value of test-statistic is: -2.8888 4.1962 
## 
## Critical values for test statistics: 
##       1pct  5pct 10pct
## tau2 -3.43 -2.86 -2.57
## phi1  6.43  4.59  3.78
plot(z1.df2)

Parece que no hay raíz unitaria por lo que existe la cointegración

La constante parece no ser signiicativa, hay que tener en cuanta que a falta de constante los test se vuelven inestables.

Veamos el modelo sin constante:

z1.df1 <- ur.df(z1, type="none", selectlags = "BIC")
print(summary(z1.df1))
## 
## ############################################### 
## # Augmented Dickey-Fuller Test Unit Root Test # 
## ############################################### 
## 
## Test regression none 
## 
## 
## Call:
## lm(formula = z.diff ~ z.lag.1 - 1 + z.diff.lag)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -0.32733 -0.01316 -0.00042  0.01301  0.19342 
## 
## Coefficients:
##             Estimate Std. Error t value Pr(>|t|)   
## z.lag.1    -0.006114   0.002116  -2.889  0.00389 **
## z.diff.lag -0.036407   0.018145  -2.006  0.04489 * 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.02557 on 3036 degrees of freedom
## Multiple R-squared:  0.00431,    Adjusted R-squared:  0.003654 
## F-statistic: 6.571 on 2 and 3036 DF,  p-value: 0.001421
## 
## 
## Value of test-statistic is: -2.8894 
## 
## Critical values for test statistics: 
##       1pct  5pct 10pct
## tau1 -2.58 -1.95 -1.62
plot(z1.df1)

Parece que no hay raíz unitaria por lo que existe la cointegración

7.1.2 ASML-BNP~2

Primer paso:

e2 <- dynlm(asml ~ bnp)
print(summary(e2))
## 
## Time series regression with "zoo" data:
## Start = 2005-01-03, End = 2017-04-07
## 
## Call:
## dynlm(formula = asml ~ bnp)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -1.17165 -0.68224 -0.01717  0.65174  1.12240 
## 
## Coefficients:
##             Estimate Std. Error t value Pr(>|t|)    
## (Intercept)  1.40904    0.19035   7.402 1.72e-13 ***
## bnp          0.56535    0.05155  10.968  < 2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.7059 on 3038 degrees of freedom
## Multiple R-squared:  0.03809,    Adjusted R-squared:  0.03777 
## F-statistic: 120.3 on 1 and 3038 DF,  p-value: < 2.2e-16
tsdisplay(e2$residuals)

z2 <- e2$residuals

La ACF sugiere que los residuos no son estacionarios. Puede haber una raíz unitaria.

Segundo paso:

Aplicamos la ADF a \(z2\)

  1. Con constante y tendencia
z2.df3 <- ur.df(z2, type="trend", selectlags = "BIC")
print(summary(z2.df3))
## 
## ############################################### 
## # Augmented Dickey-Fuller Test Unit Root Test # 
## ############################################### 
## 
## Test regression trend 
## 
## 
## Call:
## lm(formula = z.diff ~ z.lag.1 + 1 + tt + z.diff.lag)
## 
## Residuals:
##       Min        1Q    Median        3Q       Max 
## -0.221551 -0.010330 -0.000326  0.010472  0.199526 
## 
## Coefficients:
##               Estimate Std. Error t value Pr(>|t|)    
## (Intercept) -9.884e-03  2.993e-03  -3.303 0.000968 ***
## z.lag.1     -8.908e-03  2.428e-03  -3.669 0.000248 ***
## tt           6.983e-06  1.953e-06   3.576 0.000354 ***
## z.diff.lag  -1.612e-03  1.815e-02  -0.089 0.929222    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.02035 on 3034 degrees of freedom
## Multiple R-squared:  0.004454,   Adjusted R-squared:  0.003469 
## F-statistic: 4.524 on 3 and 3034 DF,  p-value: 0.003595
## 
## 
## Value of test-statistic is: -3.6687 5.8123 6.7302 
## 
## Critical values for test statistics: 
##       1pct  5pct 10pct
## tau3 -3.96 -3.41 -3.12
## phi2  6.09  4.68  4.03
## phi3  8.27  6.25  5.34
plot(z2.df3)

NO podemos rechazar la \(H_0\) por lo que parace que existe la raíz unitaria.

Dado el valor crítico de \(\phi_3\) podemos eliminar la tendencia.

z2.df2 <- ur.df(z2, type="drift", selectlags = "BIC")
print(summary(z2.df2))
## 
## ############################################### 
## # Augmented Dickey-Fuller Test Unit Root Test # 
## ############################################### 
## 
## Test regression drift 
## 
## 
## Call:
## lm(formula = z.diff ~ z.lag.1 + 1 + z.diff.lag)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -0.22308 -0.01032 -0.00045  0.01046  0.20048 
## 
## Coefficients:
##               Estimate Std. Error t value Pr(>|t|)  
## (Intercept)  0.0007366  0.0003702   1.990   0.0467 *
## z.lag.1     -0.0004287  0.0005245  -0.817   0.4138  
## z.diff.lag  -0.0058534  0.0181478  -0.323   0.7471  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.02039 on 3035 degrees of freedom
## Multiple R-squared:  0.0002569,  Adjusted R-squared:  -0.0004019 
## F-statistic: 0.3899 on 2 and 3035 DF,  p-value: 0.6771
## 
## 
## Value of test-statistic is: -0.8173 2.3146 
## 
## Critical values for test statistics: 
##       1pct  5pct 10pct
## tau2 -3.43 -2.86 -2.57
## phi1  6.43  4.59  3.78
plot(z2.df2)

Parece que hay raíz unitaria por lo que no existe la cointegración

La constante parece no ser signiicativa, hay que tener en cuanta que a falta de constante los test se vuelven inestables.

Veamos el modelo sin constante:

z2.df1 <- ur.df(z2, type="none", selectlags = "BIC")
print(summary(z2.df1))
## 
## ############################################### 
## # Augmented Dickey-Fuller Test Unit Root Test # 
## ############################################### 
## 
## Test regression none 
## 
## 
## Call:
## lm(formula = z.diff ~ z.lag.1 - 1 + z.diff.lag)
## 
## Residuals:
##       Min        1Q    Median        3Q       Max 
## -0.222376 -0.009612  0.000294  0.011177  0.201192 
## 
## Coefficients:
##              Estimate Std. Error t value Pr(>|t|)
## z.lag.1    -0.0004293  0.0005248  -0.818    0.413
## z.diff.lag -0.0045739  0.0181452  -0.252    0.801
## 
## Residual standard error: 0.0204 on 3036 degrees of freedom
## Multiple R-squared:  0.0002433,  Adjusted R-squared:  -0.0004153 
## F-statistic: 0.3694 on 2 and 3036 DF,  p-value: 0.6912
## 
## 
## Value of test-statistic is: -0.818 
## 
## Critical values for test statistics: 
##       1pct  5pct 10pct
## tau1 -2.58 -1.95 -1.62
plot(z2.df1)

Parece que hay raíz unitaria por lo que no existe la cointegración

7.1.3 BAYN-BNP~3

Primer paso:

e3 <- dynlm(bayn ~ bnp)
print(summary(e3))
## 
## Time series regression with "zoo" data:
## Start = 2005-01-03, End = 2017-04-07
## 
## Call:
## dynlm(formula = bayn ~ bnp)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -0.86694 -0.34484 -0.03923  0.44166  0.87529 
## 
## Coefficients:
##             Estimate Std. Error t value Pr(>|t|)    
## (Intercept)  1.29535    0.13152   9.849   <2e-16 ***
## bnp          0.72045    0.03562  20.229   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.4877 on 3038 degrees of freedom
## Multiple R-squared:  0.1187, Adjusted R-squared:  0.1184 
## F-statistic: 409.2 on 1 and 3038 DF,  p-value: < 2.2e-16
tsdisplay(e3$residuals)

z3 <- e3$residuals

La ACF sugiere que los residuos no son estacionarios. Puede haber una raíz unitaria.

Segundo paso:

Aplicamos la ADF a \(z3\)

  1. Con constante y tendencia
z3.df3 <- ur.df(z3, type="trend", selectlags = "BIC")
print(summary(z3.df3))
## 
## ############################################### 
## # Augmented Dickey-Fuller Test Unit Root Test # 
## ############################################### 
## 
## Test regression trend 
## 
## 
## Call:
## lm(formula = z.diff ~ z.lag.1 + 1 + tt + z.diff.lag)
## 
## Residuals:
##       Min        1Q    Median        3Q       Max 
## -0.144507 -0.009220 -0.000100  0.009163  0.262314 
## 
## Coefficients:
##               Estimate Std. Error t value Pr(>|t|)   
## (Intercept) -5.637e-03  2.076e-03  -2.715  0.00667 **
## z.lag.1     -7.925e-03  2.422e-03  -3.272  0.00108 **
## tt           3.981e-06  1.346e-06   2.957  0.00314 **
## z.diff.lag   5.366e-03  1.817e-02   0.295  0.76778   
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.01898 on 3034 degrees of freedom
## Multiple R-squared:  0.003633,   Adjusted R-squared:  0.002647 
## F-statistic: 3.687 on 3 and 3034 DF,  p-value: 0.0115
## 
## 
## Value of test-statistic is: -3.2719 4.1738 5.5289 
## 
## Critical values for test statistics: 
##       1pct  5pct 10pct
## tau3 -3.96 -3.41 -3.12
## phi2  6.09  4.68  4.03
## phi3  8.27  6.25  5.34
plot(z3.df3)

NO podemos rechazar la \(H_0\) por lo que parace que existe la raíz unitaria.

Dado el valor crítico de \(\phi_3\) podemos eliminar la tendencia.

z3.df2 <- ur.df(z3, type="drift", selectlags = "BIC")
print(summary(z3.df2))
## 
## ############################################### 
## # Augmented Dickey-Fuller Test Unit Root Test # 
## ############################################### 
## 
## Test regression drift 
## 
## 
## Call:
## lm(formula = z.diff ~ z.lag.1 + 1 + z.diff.lag)
## 
## Residuals:
##       Min        1Q    Median        3Q       Max 
## -0.142952 -0.009028 -0.000065  0.009163  0.264502 
## 
## Coefficients:
##               Estimate Std. Error t value Pr(>|t|)
## (Intercept)  0.0004166  0.0003448   1.208    0.227
## z.lag.1     -0.0010750  0.0007072  -1.520    0.129
## z.diff.lag   0.0013972  0.0181431   0.077    0.939
## 
## Residual standard error: 0.019 on 3035 degrees of freedom
## Multiple R-squared:  0.000762,   Adjusted R-squared:  0.0001035 
## F-statistic: 1.157 on 2 and 3035 DF,  p-value: 0.3145
## 
## 
## Value of test-statistic is: -1.5201 1.8854 
## 
## Critical values for test statistics: 
##       1pct  5pct 10pct
## tau2 -3.43 -2.86 -2.57
## phi1  6.43  4.59  3.78
plot(z3.df2)

Parece que hay raíz unitaria por lo que no existe la cointegración

La constante parece no ser signiicativa, hay que tener en cuanta que a falta de constante los test se vuelven inestables.

Veamos el modelo sin constante:

z3.df1 <- ur.df(z3, type="none", selectlags = "BIC")
print(summary(z3.df1))
## 
## ############################################### 
## # Augmented Dickey-Fuller Test Unit Root Test # 
## ############################################### 
## 
## Test regression none 
## 
## 
## Call:
## lm(formula = z.diff ~ z.lag.1 - 1 + z.diff.lag)
## 
## Residuals:
##       Min        1Q    Median        3Q       Max 
## -0.142605 -0.008610  0.000353  0.009583  0.264918 
## 
## Coefficients:
##              Estimate Std. Error t value Pr(>|t|)
## z.lag.1    -0.0010751  0.0007072  -1.520    0.129
## z.diff.lag  0.0018716  0.0181402   0.103    0.918
## 
## Residual standard error: 0.019 on 3036 degrees of freedom
## Multiple R-squared:  0.0007629,  Adjusted R-squared:  0.0001046 
## F-statistic: 1.159 on 2 and 3036 DF,  p-value: 0.314
## 
## 
## Value of test-statistic is: -1.5201 
## 
## Critical values for test statistics: 
##       1pct  5pct 10pct
## tau1 -2.58 -1.95 -1.62
plot(z3.df1)

Parece que hay raíz unitaria por lo que no existe la cointegración

7.1.4 BAYN -ASML~4

Primer paso:

e4 <- dynlm(bayn ~ asml)
print(summary(e4))
## 
## Time series regression with "zoo" data:
## Start = 2005-01-03, End = 2017-04-07
## 
## Call:
## dynlm(formula = bayn ~ asml)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -0.4147 -0.1199  0.0015  0.1290  0.4213 
## 
## Coefficients:
##             Estimate Std. Error t value Pr(>|t|)    
## (Intercept) 1.549408   0.014257   108.7   <2e-16 ***
## asml        0.687391   0.003999   171.9   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.1586 on 3038 degrees of freedom
## Multiple R-squared:  0.9068, Adjusted R-squared:  0.9068 
## F-statistic: 2.955e+04 on 1 and 3038 DF,  p-value: < 2.2e-16
tsdisplay(e4$residuals)

z4 <- e4$residuals

La ACF sugiere que los residuos no son estacionarios. Puede haber una raíz unitaria.

Segundo paso:

Aplicamos la ADF a \(z4\)

  1. Con constante y tendencia
z4.df3 <- ur.df(z4, type="trend", selectlags = "BIC")
print(summary(z4.df3))
## 
## ############################################### 
## # Augmented Dickey-Fuller Test Unit Root Test # 
## ############################################### 
## 
## Test regression trend 
## 
## 
## Call:
## lm(formula = z.diff ~ z.lag.1 + 1 + tt + z.diff.lag)
## 
## Residuals:
##       Min        1Q    Median        3Q       Max 
## -0.136194 -0.009377  0.000428  0.009723  0.248213 
## 
## Coefficients:
##               Estimate Std. Error t value Pr(>|t|)   
## (Intercept)  2.920e-04  6.802e-04   0.429  0.66772   
## z.lag.1     -6.631e-03  2.147e-03  -3.089  0.00203 **
## tt          -2.006e-07  3.875e-07  -0.518  0.60476   
## z.diff.lag  -3.875e-02  1.814e-02  -2.136  0.03280 * 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.01873 on 3034 degrees of freedom
## Multiple R-squared:  0.004994,   Adjusted R-squared:  0.00401 
## F-statistic: 5.076 on 3 and 3034 DF,  p-value: 0.00166
## 
## 
## Value of test-statistic is: -3.0888 3.2828 4.9235 
## 
## Critical values for test statistics: 
##       1pct  5pct 10pct
## tau3 -3.96 -3.41 -3.12
## phi2  6.09  4.68  4.03
## phi3  8.27  6.25  5.34
plot(z4.df3)

NO podemos rechazar la \(H_0\) por lo que parace que existe la raíz unitaria.

Dado el valor crítico de \(\phi_3\) podemos eliminar la tendencia.

z4.df2 <- ur.df(z4, type="drift", selectlags = "BIC")
print(summary(z4.df2))
## 
## ############################################### 
## # Augmented Dickey-Fuller Test Unit Root Test # 
## ############################################### 
## 
## Test regression drift 
## 
## 
## Call:
## lm(formula = z.diff ~ z.lag.1 + 1 + z.diff.lag)
## 
## Residuals:
##       Min        1Q    Median        3Q       Max 
## -0.136136 -0.009342  0.000387  0.009757  0.248276 
## 
## Coefficients:
##               Estimate Std. Error t value Pr(>|t|)   
## (Intercept) -1.297e-05  3.398e-04  -0.038  0.96955   
## z.lag.1     -6.644e-03  2.146e-03  -3.095  0.00198 **
## z.diff.lag  -3.865e-02  1.814e-02  -2.131  0.03319 * 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.01873 on 3035 degrees of freedom
## Multiple R-squared:  0.004906,   Adjusted R-squared:  0.00425 
## F-statistic: 7.482 on 2 and 3035 DF,  p-value: 0.0005738
## 
## 
## Value of test-statistic is: -3.0954 4.7915 
## 
## Critical values for test statistics: 
##       1pct  5pct 10pct
## tau2 -3.43 -2.86 -2.57
## phi1  6.43  4.59  3.78
plot(z4.df2)

Parece que no hay raíz unitaria por lo que existe la cointegración al igual que regresando inversamente en el primer caso.

La constante parece no ser signiicativa, hay que tener en cuanta que a falta de constante los test se vuelven inestables.

Veamos el modelo sin constante:

z4.df1 <- ur.df(z4, type="none", selectlags = "BIC")
print(summary(z4.df1))
## 
## ############################################### 
## # Augmented Dickey-Fuller Test Unit Root Test # 
## ############################################### 
## 
## Test regression none 
## 
## 
## Call:
## lm(formula = z.diff ~ z.lag.1 - 1 + z.diff.lag)
## 
## Residuals:
##       Min        1Q    Median        3Q       Max 
## -0.136149 -0.009355  0.000374  0.009744  0.248263 
## 
## Coefficients:
##             Estimate Std. Error t value Pr(>|t|)   
## z.lag.1    -0.006644   0.002146  -3.096  0.00198 **
## z.diff.lag -0.038650   0.018137  -2.131  0.03317 * 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.01872 on 3036 degrees of freedom
## Multiple R-squared:  0.004906,   Adjusted R-squared:  0.004251 
## F-statistic: 7.484 on 2 and 3036 DF,  p-value: 0.0005724
## 
## 
## Value of test-statistic is: -3.0959 
## 
## Critical values for test statistics: 
##       1pct  5pct 10pct
## tau1 -2.58 -1.95 -1.62
plot(z4.df1)

Parece que no hay raíz unitaria por lo que existe la cointegración

7.1.5 BNP -ASML~5

Primer paso:

e5 <- dynlm(bnp ~ asml)
print(summary(e5))
## 
## Time series regression with "zoo" data:
## Start = 2005-01-03, End = 2017-04-07
## 
## Call:
## dynlm(formula = bnp ~ asml)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -0.91776 -0.10695  0.03631  0.16570  0.46131 
## 
## Coefficients:
##             Estimate Std. Error t value Pr(>|t|)    
## (Intercept) 3.449164   0.021901  157.49   <2e-16 ***
## asml        0.067374   0.006143   10.97   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.2437 on 3038 degrees of freedom
## Multiple R-squared:  0.03809,    Adjusted R-squared:  0.03777 
## F-statistic: 120.3 on 1 and 3038 DF,  p-value: < 2.2e-16
tsdisplay(e5$residuals)

z5 <- e5$residuals

La ACF sugiere que los residuos no son estacionarios. Puede haber una raíz unitaria.

Segundo paso:

Aplicamos la ADF a \(z5\)

  1. Con constante y tendencia
z5.df3 <- ur.df(z5, type="trend", selectlags = "BIC")
print(summary(z5.df3))
## 
## ############################################### 
## # Augmented Dickey-Fuller Test Unit Root Test # 
## ############################################### 
## 
## Test regression trend 
## 
## 
## Call:
## lm(formula = z.diff ~ z.lag.1 + 1 + tt + z.diff.lag)
## 
## Residuals:
##       Min        1Q    Median        3Q       Max 
## -0.190704 -0.011440  0.000412  0.011774  0.180697 
## 
## Coefficients:
##               Estimate Std. Error t value Pr(>|t|)   
## (Intercept)  2.641e-04  9.149e-04   0.289  0.77289   
## z.lag.1     -5.332e-03  1.881e-03  -2.835  0.00461 **
## tt          -6.868e-08  5.217e-07  -0.132  0.89527   
## z.diff.lag   1.860e-02  1.815e-02   1.025  0.30566   
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.02513 on 3034 degrees of freedom
## Multiple R-squared:  0.002897,   Adjusted R-squared:  0.001911 
## F-statistic: 2.939 on 3 and 3034 DF,  p-value: 0.032
## 
## 
## Value of test-statistic is: -2.8351 2.7242 4.0246 
## 
## Critical values for test statistics: 
##       1pct  5pct 10pct
## tau3 -3.96 -3.41 -3.12
## phi2  6.09  4.68  4.03
## phi3  8.27  6.25  5.34
plot(z5.df3)

NO podemos rechazar la \(H_0\) por lo que parace que existe la raíz unitaria.

Dado el valor crítico de \(\phi_3\) podemos eliminar la tendencia.

z5.df2 <- ur.df(z5, type="drift", selectlags = "BIC")
print(summary(z5.df2))
## 
## ############################################### 
## # Augmented Dickey-Fuller Test Unit Root Test # 
## ############################################### 
## 
## Test regression drift 
## 
## 
## Call:
## lm(formula = z.diff ~ z.lag.1 + 1 + z.diff.lag)
## 
## Residuals:
##       Min        1Q    Median        3Q       Max 
## -0.190661 -0.011482  0.000437  0.011788  0.180748 
## 
## Coefficients:
##               Estimate Std. Error t value Pr(>|t|)   
## (Intercept)  0.0001596  0.0004558   0.350  0.72621   
## z.lag.1     -0.0053115  0.0018739  -2.835  0.00462 **
## z.diff.lag   0.0185846  0.0181505   1.024  0.30596   
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.02513 on 3035 degrees of freedom
## Multiple R-squared:  0.002892,   Adjusted R-squared:  0.002235 
## F-statistic: 4.401 on 2 and 3035 DF,  p-value: 0.01235
## 
## 
## Value of test-statistic is: -2.8345 4.079 
## 
## Critical values for test statistics: 
##       1pct  5pct 10pct
## tau2 -3.43 -2.86 -2.57
## phi1  6.43  4.59  3.78
plot(z5.df2)

Parece que hay raíz unitaria por lo que no existe la cointegración

La constante parece no ser signiicativa, hay que tener en cuanta que a falta de constante los test se vuelven inestables.

Veamos el modelo sin constante:

z5.df1 <- ur.df(z5, type="none", selectlags = "BIC")
print(summary(z5.df1))
## 
## ############################################### 
## # Augmented Dickey-Fuller Test Unit Root Test # 
## ############################################### 
## 
## Test regression none 
## 
## 
## Call:
## lm(formula = z.diff ~ z.lag.1 - 1 + z.diff.lag)
## 
## Residuals:
##       Min        1Q    Median        3Q       Max 
## -0.190503 -0.011322  0.000597  0.011948  0.180907 
## 
## Coefficients:
##             Estimate Std. Error t value Pr(>|t|)   
## z.lag.1    -0.005312   0.001874  -2.835  0.00461 **
## z.diff.lag  0.018626   0.018147   1.026  0.30479   
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.02512 on 3036 degrees of freedom
## Multiple R-squared:  0.002893,   Adjusted R-squared:  0.002236 
## F-statistic: 4.405 on 2 and 3036 DF,  p-value: 0.0123
## 
## 
## Value of test-statistic is: -2.8351 
## 
## Critical values for test statistics: 
##       1pct  5pct 10pct
## tau1 -2.58 -1.95 -1.62
plot(z5.df1)

Parece que hay raíz unitaria por lo que existe la cointegración

Aunque hay que subayar que este resultado no es muy fiable por la falta de la estabilidad en la pruba.

7.1.6 BNP -BAYN~6

Primer paso:

e6 <- dynlm(bnp ~ bayn)
print(summary(e6))
## 
## Time series regression with "zoo" data:
## Start = 2005-01-03, End = 2017-04-07
## 
## Call:
## dynlm(formula = bnp ~ bayn)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -0.90946 -0.10409  0.02965  0.12688  0.45185 
## 
## Coefficients:
##             Estimate Std. Error t value Pr(>|t|)    
## (Intercept) 3.033638   0.032449   93.49   <2e-16 ***
## bayn        0.164767   0.008145   20.23   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.2333 on 3038 degrees of freedom
## Multiple R-squared:  0.1187, Adjusted R-squared:  0.1184 
## F-statistic: 409.2 on 1 and 3038 DF,  p-value: < 2.2e-16
tsdisplay(e6$residuals)

z6 <- e6$residuals

La ACF sugiere que los residuos no son estacionarios. Puede haber una raíz unitaria.

Segundo paso:

Aplicamos la ADF a \(z6\)

  1. Con constante y tendencia
z6.df3 <- ur.df(z6, type="trend", selectlags = "BIC")
print(summary(z6.df3))
## 
## ############################################### 
## # Augmented Dickey-Fuller Test Unit Root Test # 
## ############################################### 
## 
## Test regression trend 
## 
## 
## Call:
## lm(formula = z.diff ~ z.lag.1 + 1 + tt + z.diff.lag)
## 
## Residuals:
##       Min        1Q    Median        3Q       Max 
## -0.192063 -0.011269  0.000059  0.011536  0.179795 
## 
## Coefficients:
##               Estimate Std. Error t value Pr(>|t|)   
## (Intercept)  5.096e-04  9.027e-04   0.565  0.57245   
## z.lag.1     -5.660e-03  1.950e-03  -2.903  0.00372 **
## tt          -2.546e-07  5.176e-07  -0.492  0.62287   
## z.diff.lag   2.347e-02  1.815e-02   1.293  0.19612   
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.02436 on 3034 degrees of freedom
## Multiple R-squared:  0.0032, Adjusted R-squared:  0.002215 
## F-statistic: 3.247 on 3 and 3034 DF,  p-value: 0.02103
## 
## 
## Value of test-statistic is: -2.903 2.8449 4.2287 
## 
## Critical values for test statistics: 
##       1pct  5pct 10pct
## tau3 -3.96 -3.41 -3.12
## phi2  6.09  4.68  4.03
## phi3  8.27  6.25  5.34
plot(z6.df3)

NO podemos rechazar la \(H_0\) por lo que parace que existe la raíz unitaria.

Dado el valor crítico de \(\phi_3\) podemos eliminar la tendencia.

z6.df2 <- ur.df(z6, type="drift", selectlags = "BIC")
print(summary(z6.df2))
## 
## ############################################### 
## # Augmented Dickey-Fuller Test Unit Root Test # 
## ############################################### 
## 
## Test regression drift 
## 
## 
## Call:
## lm(formula = z.diff ~ z.lag.1 + 1 + z.diff.lag)
## 
## Residuals:
##       Min        1Q    Median        3Q       Max 
## -0.191870 -0.011348  0.000089  0.011422  0.180088 
## 
## Coefficients:
##               Estimate Std. Error t value Pr(>|t|)   
## (Intercept)  0.0001225  0.0004420   0.277  0.78173   
## z.lag.1     -0.0054412  0.0018981  -2.867  0.00418 **
## z.diff.lag   0.0233333  0.0181493   1.286  0.19867   
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.02436 on 3035 degrees of freedom
## Multiple R-squared:  0.003121,   Adjusted R-squared:  0.002464 
## F-statistic: 4.751 on 2 and 3035 DF,  p-value: 0.008708
## 
## 
## Value of test-statistic is: -2.8666 4.1475 
## 
## Critical values for test statistics: 
##       1pct  5pct 10pct
## tau2 -3.43 -2.86 -2.57
## phi1  6.43  4.59  3.78
plot(z6.df2)

Parece que no hay raíz unitaria por lo que existe la cointegración

La constante parece no ser signiicativa, hay que tener en cuanta que a falta de constante los test se vuelven inestables.

Veamos el modelo sin constante:

z6.df1 <- ur.df(z6, type="none", selectlags = "BIC")
print(summary(z6.df1))
## 
## ############################################### 
## # Augmented Dickey-Fuller Test Unit Root Test # 
## ############################################### 
## 
## Test regression none 
## 
## 
## Call:
## lm(formula = z.diff ~ z.lag.1 - 1 + z.diff.lag)
## 
## Residuals:
##       Min        1Q    Median        3Q       Max 
## -0.191748 -0.011225  0.000212  0.011544  0.180210 
## 
## Coefficients:
##             Estimate Std. Error t value Pr(>|t|)   
## z.lag.1    -0.005441   0.001898  -2.867  0.00417 **
## z.diff.lag  0.023360   0.018146   1.287  0.19809   
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.02436 on 3036 degrees of freedom
## Multiple R-squared:  0.003122,   Adjusted R-squared:  0.002466 
## F-statistic: 4.754 on 2 and 3036 DF,  p-value: 0.008678
## 
## 
## Value of test-statistic is: -2.8672 
## 
## Critical values for test statistics: 
##       1pct  5pct 10pct
## tau1 -2.58 -1.95 -1.62
plot(z6.df1)

Parece que no hay raíz unitaria por lo que existe la cointegración